From 7fd53f1818932e417efdf121c36b6bfa2c8fa445 Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Mon, 8 Jul 2024 22:38:52 +0000 Subject: [PATCH 01/18] NetworkQoS CRD This is the CRD for Network QoS, based on the enhancement https://github.com/ovn-org/ovn-kubernetes/pull/4366 Signed-off-by: Flavio Fernandes (cherry picked from commit b07f226b323189730a2bd1fa8d111d29e782beef) --- go-controller/hack/update-codegen.sh | 8 +- .../pkg/crd/networkqos/v1alpha1/doc.go | 4 + .../pkg/crd/networkqos/v1alpha1/register.go | 34 ++++ .../pkg/crd/networkqos/v1alpha1/types.go | 184 ++++++++++++++++++ .../crds/k8s.ovn.org_networkqoses.yaml | 1 + 5 files changed, 228 insertions(+), 3 deletions(-) create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/doc.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/register.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/types.go create mode 120000 helm/ovn-kubernetes/crds/k8s.ovn.org_networkqoses.yaml diff --git a/go-controller/hack/update-codegen.sh b/go-controller/hack/update-codegen.sh index ee86a1c3ec..97aadaf34f 100755 --- a/go-controller/hack/update-codegen.sh +++ b/go-controller/hack/update-codegen.sh @@ -64,7 +64,7 @@ for crd in ${crds}; do --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/v1/apis/clientset \ --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/clientset \ --apply-configuration-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/applyconfiguration \ - --plural-exceptions="EgressQoS:EgressQoSes,RouteAdvertisements:RouteAdvertisements" \ + --plural-exceptions="EgressQoS:EgressQoSes,RouteAdvertisements:RouteAdvertisements,NetworkQoS:NetworkQoSes" \ "$@" echo "Generating listers for $crd" @@ -72,7 +72,7 @@ for crd in ${crds}; do --go-header-file hack/boilerplate.go.txt \ --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/v1/apis/listers \ --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/listers \ - --plural-exceptions="EgressQoS:EgressQoSes,RouteAdvertisements:RouteAdvertisements" \ + --plural-exceptions="EgressQoS:EgressQoSes,RouteAdvertisements:RouteAdvertisements,NetworkQoS:NetworkQoSes" \ github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ "$@" @@ -83,7 +83,7 @@ for crd in ${crds}; do --listers-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/listers \ --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/v1/apis/informers \ --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/informers \ - --plural-exceptions="EgressQoS:EgressQoSes,RouteAdvertisements:RouteAdvertisements" \ + --plural-exceptions="EgressQoS:EgressQoSes,RouteAdvertisements:RouteAdvertisements,NetworkQoS:NetworkQoSes" \ github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ "$@" @@ -115,6 +115,8 @@ echo "Copying adminpolicybasedexternalroutes CRD" cp _output/crds/k8s.ovn.org_adminpolicybasedexternalroutes.yaml ../dist/templates/k8s.ovn.org_adminpolicybasedexternalroutes.yaml.j2 echo "Copying egressService CRD" cp _output/crds/k8s.ovn.org_egressservices.yaml ../dist/templates/k8s.ovn.org_egressservices.yaml.j2 +echo "Copying networkQoS CRD" +cp _output/crds/k8s.ovn.org_networkqoses.yaml ../dist/templates/k8s.ovn.org_networkqoses.yaml.j2 echo "Copying userdefinednetworks CRD" cp _output/crds/k8s.ovn.org_userdefinednetworks.yaml ../dist/templates/k8s.ovn.org_userdefinednetworks.yaml.j2 echo "Copying clusteruserdefinednetworks CRD" diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/doc.go b/go-controller/pkg/crd/networkqos/v1alpha1/doc.go new file mode 100644 index 0000000000..4d09443215 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/doc.go @@ -0,0 +1,4 @@ +// Package v1alpha1 contains API Schema definitions for the network v1 API group +// +k8s:deepcopy-gen=package +// +groupName=k8s.ovn.org +package v1alpha1 diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/register.go b/go-controller/pkg/crd/networkqos/v1alpha1/register.go new file mode 100644 index 0000000000..21c80fdb89 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/register.go @@ -0,0 +1,34 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "k8s.ovn.org" + SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NetworkQoS{}, + &NetworkQoSList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/types.go b/go-controller/pkg/crd/networkqos/v1alpha1/types.go new file mode 100644 index 0000000000..0a9997e45e --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/types.go @@ -0,0 +1,184 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:path=networkqoses +// +kubebuilder::singular=networkqos +// +kubebuilder:object:root=true +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=".status.status" +// +kubebuilder:subresource:status +// NetworkQoS is a CRD that allows the user to define a DSCP marking and metering +// for pods ingress/egress traffic on its namespace to specified CIDRs, +// protocol and port. Traffic belong these pods will be checked against +// each Rule in the namespace's NetworkQoS, and if there is a match the traffic +// is marked with relevant DSCP value and enforcing specified policing +// parameters. +type NetworkQoS struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec Spec `json:"spec,omitempty"` + Status Status `json:"status,omitempty"` +} + +// Spec defines the desired state of NetworkQoS +type Spec struct { + // netAttachRefs points to a list of objects which could be either NAD, UDN, or Cluster UDN. + // In the case of NAD, the network type could be of type Layer-3, Layer-2, or Localnet. + // If not specified, then the primary network of the selected Pods will be chosen. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="netAttachRefs is immutable" + NetworkAttachmentRefs []corev1.ObjectReference `json:"netAttachRefs,omitempty"` + + // podSelector applies the NetworkQoS rule only to the pods in the namespace whose label + // matches this definition. This field is optional, and in case it is not set + // results in the rule being applied to all pods in the namespace. + // +optional + PodSelector metav1.LabelSelector `json:"podSelector,omitempty"` + + // priority is a value from 0 to 100 and represents the NetworkQoS' priority. + // QoSes with numerically higher priority takes precedence over those with lower. + // +kubebuilder:validation:Maximum:=100 + // +kubebuilder:validation:Minimum:=0 + Priority int `json:"priority"` + + // egress a collection of Egress NetworkQoS rule objects. A total of 20 rules will + // be allowed in each NetworkQoS instance. The relative precedence of egress rules + // within a single NetworkQos object (all of which share the priority) will be + // determined by the order in which the rule is written. Thus, a rule that appears + // first in the list of egress rules would take the lower precedence. + Egress []Rule `json:"egress"` +} + +type Rule struct { + // dscp marking value for matching pods' traffic. + // +kubebuilder:validation:Maximum:=63 + // +kubebuilder:validation:Minimum:=0 + DSCP int `json:"dscp"` + + // classifier The classifier on which packets should match + // to apply the NetworkQoS Rule. + // This field is optional, and in case it is not set the rule is applied + // to all egress traffic regardless of the destination. + // +optional + Classifier Classifier `json:"classifier"` + + // +optional + Bandwidth Bandwidth `json:"bandwidth"` +} + +type Classifier struct { + // +optional + To []Destination `json:"to"` + + // +optional + Port Port `json:"port"` +} + +// Bandwidth controls the maximum of rate traffic that can be sent +// or received on the matching packets. +type Bandwidth struct { + // rate The value of rate limit in kbps. Traffic over the limit + // will be dropped. + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=4294967295 + // +optional + Rate uint32 `json:"rate"` + + // burst The value of burst rate limit in kilobits. + // This also needs rate to be specified. + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=4294967295 + // +optional + Burst uint32 `json:"burst"` +} + +// Port specifies destination protocol and port on which NetworkQoS +// rule is applied +type Port struct { + // protocol (tcp, udp, sctp) that the traffic must match. + // +kubebuilder:validation:Pattern=^TCP|UDP|SCTP$ + // +optional + Protocol string `json:"protocol"` + + // port that the traffic must match + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=65535 + // +optional + Port int32 `json:"port"` +} + +// Destination describes a peer to apply NetworkQoS configuration for the outgoing traffic. +// Only certain combinations of fields are allowed. +// +kubebuilder:validation:XValidation:rule="!(has(self.ipBlock) && (has(self.podSelector) || has(self.namespaceSelector)))",message="Can't specify both podSelector/namespaceSelector and ipBlock" +type Destination struct { + // podSelector is a label selector which selects pods. This field follows standard label + // selector semantics; if present but empty, it selects all pods. + // + // If namespaceSelector is also set, then the NetworkQoS as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the NetworkQoS's own namespace. + // +optional + PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` + + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. + // + // If podSelector is also set, then the NetworkQoS as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` + + // ipBlock defines policy on a particular IPBlock. If this field is set then + // neither of the other fields can be. + // +optional + IPBlock *networkingv1.IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` +} + +// Status defines the observed state of NetworkQoS +type Status struct { + // A concise indication of whether the NetworkQoS resource is applied with success. + // +optional + Status string `json:"status,omitempty"` + + // An array of condition objects indicating details about status of NetworkQoS object. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:path=networkqoses +// +kubebuilder::singular=networkqos +// NetworkQoSList contains a list of NetworkQoS +type NetworkQoSList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NetworkQoS `json:"items"` +} diff --git a/helm/ovn-kubernetes/crds/k8s.ovn.org_networkqoses.yaml b/helm/ovn-kubernetes/crds/k8s.ovn.org_networkqoses.yaml new file mode 120000 index 0000000000..cb314342ba --- /dev/null +++ b/helm/ovn-kubernetes/crds/k8s.ovn.org_networkqoses.yaml @@ -0,0 +1 @@ +../../../dist/templates/k8s.ovn.org_networkqoses.yaml.j2 \ No newline at end of file From ac164147e777b99ede1d9ecb1807146088f92028 Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Wed, 19 Mar 2025 20:26:53 +0000 Subject: [PATCH 02/18] update-codegen.sh: be aware of crd version Signed-off-by: Flavio Fernandes (cherry picked from commit 8e5ad2bb6366f3b1d15168354989e816dbc26370) --- go-controller/hack/update-codegen.sh | 56 +++++++++++++++++----------- 1 file changed, 35 insertions(+), 21 deletions(-) diff --git a/go-controller/hack/update-codegen.sh b/go-controller/hack/update-codegen.sh index 97aadaf34f..66e26375a8 100755 --- a/go-controller/hack/update-codegen.sh +++ b/go-controller/hack/update-codegen.sh @@ -27,6 +27,18 @@ if [[ "${builddir}" == /tmp/* ]]; then #paranoia rm -rf "${builddir}" fi +# Helper function to get API version for a given CRD +get_crd_version() { + case "$1" in + networkqos) + echo "v1alpha1" + ;; + *) + echo "v1" + ;; + esac +} + # deepcopy for types deepcopy-gen \ --go-header-file hack/boilerplate.go.txt \ @@ -39,52 +51,54 @@ for crd in ${crds}; do # for types we already generated deepcopy above which is all we need [ "$crd" = "types" ] && continue - echo "Generating deepcopy funcs for $crd" + api_version=$(get_crd_version "${crd}") + + echo "Generating deepcopy funcs for $crd ($api_version)" deepcopy-gen \ --go-header-file hack/boilerplate.go.txt \ --output-file zz_generated.deepcopy.go \ --bounding-dirs github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd \ - github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ + github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/${api_version} \ "$@" - echo "Generating apply configuration for $crd" + echo "Generating apply configuration for $crd ($api_version)" applyconfiguration-gen \ --go-header-file hack/boilerplate.go.txt \ - --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/v1/apis/applyconfiguration \ - --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/applyconfiguration \ - github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ + --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/${api_version}/apis/applyconfiguration \ + --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/${api_version}/apis/applyconfiguration \ + github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/${api_version} \ "$@" - echo "Generating clientset for $crd" + echo "Generating clientset for $crd ($api_version)" client-gen \ --go-header-file hack/boilerplate.go.txt \ --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" \ --input-base "" \ - --input github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ - --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/v1/apis/clientset \ - --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/clientset \ - --apply-configuration-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/applyconfiguration \ + --input github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/${api_version} \ + --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/${api_version}/apis/clientset \ + --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/${api_version}/apis/clientset \ + --apply-configuration-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/${api_version}/apis/applyconfiguration \ --plural-exceptions="EgressQoS:EgressQoSes,RouteAdvertisements:RouteAdvertisements,NetworkQoS:NetworkQoSes" \ "$@" - echo "Generating listers for $crd" + echo "Generating listers for $crd ($api_version)" lister-gen \ --go-header-file hack/boilerplate.go.txt \ - --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/v1/apis/listers \ - --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/listers \ + --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/${api_version}/apis/listers \ + --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/${api_version}/apis/listers \ --plural-exceptions="EgressQoS:EgressQoSes,RouteAdvertisements:RouteAdvertisements,NetworkQoS:NetworkQoSes" \ - github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ + github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/${api_version} \ "$@" - echo "Generating informers for $crd" + echo "Generating informers for $crd ($api_version)" informer-gen \ --go-header-file hack/boilerplate.go.txt \ - --versioned-clientset-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/clientset/versioned \ - --listers-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/listers \ - --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/v1/apis/informers \ - --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/informers \ + --versioned-clientset-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/${api_version}/apis/clientset/versioned \ + --listers-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/${api_version}/apis/listers \ + --output-dir "${SCRIPT_ROOT}"/pkg/crd/$crd/${api_version}/apis/informers \ + --output-pkg github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/${api_version}/apis/informers \ --plural-exceptions="EgressQoS:EgressQoSes,RouteAdvertisements:RouteAdvertisements,NetworkQoS:NetworkQoSes" \ - github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ + github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/${api_version} \ "$@" done From 367c2a5e401015ea20392f87a66d08c1d04925f7 Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Wed, 19 Mar 2025 20:30:04 +0000 Subject: [PATCH 03/18] generated crd from update-codegen.sh cd go-controller && ./hack/update-codegen.sh Signed-off-by: Flavio Fernandes (cherry picked from commit 964c66e7a282e530733be02e4e83bb05a62f27c1) --- .../k8s.ovn.org_networkqoses.yaml.j2 | 444 ++++++++++++++++++ .../applyconfiguration/internal/internal.go | 61 +++ .../networkqos/v1alpha1/bandwidth.go | 47 ++ .../networkqos/v1alpha1/classifier.go | 52 ++ .../networkqos/v1alpha1/destination.go | 61 +++ .../networkqos/v1alpha1/networkqos.go | 224 +++++++++ .../networkqos/v1alpha1/port.go | 47 ++ .../networkqos/v1alpha1/rule.go | 56 +++ .../networkqos/v1alpha1/spec.go | 77 +++ .../networkqos/v1alpha1/status.go | 56 +++ .../v1alpha1/apis/applyconfiguration/utils.go | 57 +++ .../apis/clientset/versioned/clientset.go | 119 +++++ .../versioned/fake/clientset_generated.go | 121 +++++ .../apis/clientset/versioned/fake/doc.go | 19 + .../apis/clientset/versioned/fake/register.go | 55 +++ .../apis/clientset/versioned/scheme/doc.go | 19 + .../clientset/versioned/scheme/register.go | 55 +++ .../typed/networkqos/v1alpha1/doc.go | 19 + .../typed/networkqos/v1alpha1/fake/doc.go | 19 + .../v1alpha1/fake/fake_networkqos.go | 50 ++ .../v1alpha1/fake/fake_networkqos_client.go | 39 ++ .../v1alpha1/generated_expansion.go | 20 + .../typed/networkqos/v1alpha1/networkqos.go | 73 +++ .../networkqos/v1alpha1/networkqos_client.go | 106 +++++ .../informers/externalversions/factory.go | 261 ++++++++++ .../informers/externalversions/generic.go | 61 +++ .../internalinterfaces/factory_interfaces.go | 39 ++ .../externalversions/networkqos/interface.go | 45 ++ .../networkqos/v1alpha1/interface.go | 44 ++ .../networkqos/v1alpha1/networkqos.go | 89 ++++ .../v1alpha1/expansion_generated.go | 26 + .../listers/networkqos/v1alpha1/networkqos.go | 69 +++ .../v1alpha1/zz_generated.deepcopy.go | 246 ++++++++++ 33 files changed, 2776 insertions(+) create mode 100644 dist/templates/k8s.ovn.org_networkqoses.yaml.j2 create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/internal/internal.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/bandwidth.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/classifier.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/destination.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/networkqos.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/port.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/rule.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/spec.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/status.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/utils.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/clientset.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake/doc.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake/register.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/scheme/doc.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/scheme/register.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/doc.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/fake/doc.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/fake/fake_networkqos.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/fake/fake_networkqos_client.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/generated_expansion.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/networkqos.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/networkqos_client.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/factory.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/generic.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/interface.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1alpha1/interface.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1alpha1/networkqos.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1/expansion_generated.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1/networkqos.go create mode 100644 go-controller/pkg/crd/networkqos/v1alpha1/zz_generated.deepcopy.go diff --git a/dist/templates/k8s.ovn.org_networkqoses.yaml.j2 b/dist/templates/k8s.ovn.org_networkqoses.yaml.j2 new file mode 100644 index 0000000000..716e4f89b5 --- /dev/null +++ b/dist/templates/k8s.ovn.org_networkqoses.yaml.j2 @@ -0,0 +1,444 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: networkqoses.k8s.ovn.org +spec: + group: k8s.ovn.org + names: + kind: NetworkQoS + listKind: NetworkQoSList + plural: networkqoses + singular: networkqos + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + NetworkQoS is a CRD that allows the user to define a DSCP marking and metering + for pods ingress/egress traffic on its namespace to specified CIDRs, + protocol and port. Traffic belong these pods will be checked against + each Rule in the namespace's NetworkQoS, and if there is a match the traffic + is marked with relevant DSCP value and enforcing specified policing + parameters. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of NetworkQoS + properties: + egress: + description: |- + egress a collection of Egress NetworkQoS rule objects. A total of 20 rules will + be allowed in each NetworkQoS instance. The relative precedence of egress rules + within a single NetworkQos object (all of which share the priority) will be + determined by the order in which the rule is written. Thus, a rule that appears + first in the list of egress rules would take the lower precedence. + items: + properties: + bandwidth: + description: |- + Bandwidth controls the maximum of rate traffic that can be sent + or received on the matching packets. + properties: + burst: + description: |- + burst The value of burst rate limit in kilobits. + This also needs rate to be specified. + format: int32 + maximum: 4294967295 + minimum: 1 + type: integer + rate: + description: |- + rate The value of rate limit in kbps. Traffic over the limit + will be dropped. + format: int32 + maximum: 4294967295 + minimum: 1 + type: integer + type: object + classifier: + description: |- + classifier The classifier on which packets should match + to apply the NetworkQoS Rule. + This field is optional, and in case it is not set the rule is applied + to all egress traffic regardless of the destination. + properties: + port: + description: |- + Port specifies destination protocol and port on which NetworkQoS + rule is applied + properties: + port: + description: port that the traffic must match + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + description: protocol (tcp, udp, sctp) that the traffic + must match. + pattern: ^TCP|UDP|SCTP$ + type: string + type: object + to: + items: + description: |- + Destination describes a peer to apply NetworkQoS configuration for the outgoing traffic. + Only certain combinations of fields are allowed. + properties: + ipBlock: + description: |- + ipBlock defines policy on a particular IPBlock. If this field is set then + neither of the other fields can be. + properties: + cidr: + description: |- + cidr is a string representing the IPBlock + Valid examples are "192.168.1.0/24" or "2001:db8::/64" + type: string + except: + description: |- + except is a slice of CIDRs that should not be included within an IPBlock + Valid examples are "192.168.1.0/24" or "2001:db8::/64" + Except values will be rejected if they are outside the cidr range + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - cidr + type: object + namespaceSelector: + description: |- + namespaceSelector selects namespaces using cluster-scoped labels. This field follows + standard label selector semantics; if present but empty, it selects all namespaces. + + If podSelector is also set, then the NetworkQoS as a whole selects + the pods matching podSelector in the namespaces selected by namespaceSelector. + Otherwise it selects all pods in the namespaces selected by namespaceSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: |- + podSelector is a label selector which selects pods. This field follows standard label + selector semantics; if present but empty, it selects all pods. + + If namespaceSelector is also set, then the NetworkQoS as a whole selects + the pods matching podSelector in the Namespaces selected by NamespaceSelector. + Otherwise it selects the pods matching podSelector in the NetworkQoS's own namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: Can't specify both podSelector/namespaceSelector + and ipBlock + rule: '!(has(self.ipBlock) && (has(self.podSelector) + || has(self.namespaceSelector)))' + type: array + type: object + dscp: + description: dscp marking value for matching pods' traffic. + maximum: 63 + minimum: 0 + type: integer + required: + - dscp + type: object + type: array + netAttachRefs: + description: |- + netAttachRefs points to a list of objects which could be either NAD, UDN, or Cluster UDN. + In the case of NAD, the network type could be of type Layer-3, Layer-2, or Localnet. + If not specified, then the primary network of the selected Pods will be chosen. + items: + description: ObjectReference contains enough information to let + you inspect or modify the referred object. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-validations: + - message: netAttachRefs is immutable + rule: self == oldSelf + podSelector: + description: |- + podSelector applies the NetworkQoS rule only to the pods in the namespace whose label + matches this definition. This field is optional, and in case it is not set + results in the rule being applied to all pods in the namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + priority: + description: |- + priority is a value from 0 to 100 and represents the NetworkQoS' priority. + QoSes with numerically higher priority takes precedence over those with lower. + maximum: 100 + minimum: 0 + type: integer + required: + - egress + - priority + type: object + status: + description: Status defines the observed state of NetworkQoS + properties: + conditions: + description: An array of condition objects indicating details about + status of NetworkQoS object. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + status: + description: A concise indication of whether the NetworkQoS resource + is applied with success. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/internal/internal.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/internal/internal.go new file mode 100644 index 0000000000..0370ccbc97 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/internal/internal.go @@ -0,0 +1,61 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/bandwidth.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/bandwidth.go new file mode 100644 index 0000000000..3a00efc01e --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/bandwidth.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// BandwidthApplyConfiguration represents a declarative configuration of the Bandwidth type for use +// with apply. +type BandwidthApplyConfiguration struct { + Rate *uint32 `json:"rate,omitempty"` + Burst *uint32 `json:"burst,omitempty"` +} + +// BandwidthApplyConfiguration constructs a declarative configuration of the Bandwidth type for use with +// apply. +func Bandwidth() *BandwidthApplyConfiguration { + return &BandwidthApplyConfiguration{} +} + +// WithRate sets the Rate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Rate field is set to the value of the last call. +func (b *BandwidthApplyConfiguration) WithRate(value uint32) *BandwidthApplyConfiguration { + b.Rate = &value + return b +} + +// WithBurst sets the Burst field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Burst field is set to the value of the last call. +func (b *BandwidthApplyConfiguration) WithBurst(value uint32) *BandwidthApplyConfiguration { + b.Burst = &value + return b +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/classifier.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/classifier.go new file mode 100644 index 0000000000..901ece9260 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/classifier.go @@ -0,0 +1,52 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ClassifierApplyConfiguration represents a declarative configuration of the Classifier type for use +// with apply. +type ClassifierApplyConfiguration struct { + To []DestinationApplyConfiguration `json:"to,omitempty"` + Port *PortApplyConfiguration `json:"port,omitempty"` +} + +// ClassifierApplyConfiguration constructs a declarative configuration of the Classifier type for use with +// apply. +func Classifier() *ClassifierApplyConfiguration { + return &ClassifierApplyConfiguration{} +} + +// WithTo adds the given value to the To field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the To field. +func (b *ClassifierApplyConfiguration) WithTo(values ...*DestinationApplyConfiguration) *ClassifierApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithTo") + } + b.To = append(b.To, *values[i]) + } + return b +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *ClassifierApplyConfiguration) WithPort(value *PortApplyConfiguration) *ClassifierApplyConfiguration { + b.Port = value + return b +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/destination.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/destination.go new file mode 100644 index 0000000000..49f3c10101 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/destination.go @@ -0,0 +1,61 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + networkingv1 "k8s.io/api/networking/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// DestinationApplyConfiguration represents a declarative configuration of the Destination type for use +// with apply. +type DestinationApplyConfiguration struct { + PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` + NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + IPBlock *networkingv1.IPBlock `json:"ipBlock,omitempty"` +} + +// DestinationApplyConfiguration constructs a declarative configuration of the Destination type for use with +// apply. +func Destination() *DestinationApplyConfiguration { + return &DestinationApplyConfiguration{} +} + +// WithPodSelector sets the PodSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodSelector field is set to the value of the last call. +func (b *DestinationApplyConfiguration) WithPodSelector(value *v1.LabelSelectorApplyConfiguration) *DestinationApplyConfiguration { + b.PodSelector = value + return b +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *DestinationApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *DestinationApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithIPBlock sets the IPBlock field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPBlock field is set to the value of the last call. +func (b *DestinationApplyConfiguration) WithIPBlock(value networkingv1.IPBlock) *DestinationApplyConfiguration { + b.IPBlock = &value + return b +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/networkqos.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/networkqos.go new file mode 100644 index 0000000000..d1cebcab83 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/networkqos.go @@ -0,0 +1,224 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// NetworkQoSApplyConfiguration represents a declarative configuration of the NetworkQoS type for use +// with apply. +type NetworkQoSApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *SpecApplyConfiguration `json:"spec,omitempty"` + Status *StatusApplyConfiguration `json:"status,omitempty"` +} + +// NetworkQoS constructs a declarative configuration of the NetworkQoS type for use with +// apply. +func NetworkQoS(name, namespace string) *NetworkQoSApplyConfiguration { + b := &NetworkQoSApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("NetworkQoS") + b.WithAPIVersion("k8s.ovn.org/v1alpha1") + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *NetworkQoSApplyConfiguration) WithKind(value string) *NetworkQoSApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *NetworkQoSApplyConfiguration) WithAPIVersion(value string) *NetworkQoSApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *NetworkQoSApplyConfiguration) WithName(value string) *NetworkQoSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *NetworkQoSApplyConfiguration) WithGenerateName(value string) *NetworkQoSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *NetworkQoSApplyConfiguration) WithNamespace(value string) *NetworkQoSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *NetworkQoSApplyConfiguration) WithUID(value types.UID) *NetworkQoSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *NetworkQoSApplyConfiguration) WithResourceVersion(value string) *NetworkQoSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *NetworkQoSApplyConfiguration) WithGeneration(value int64) *NetworkQoSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *NetworkQoSApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NetworkQoSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *NetworkQoSApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NetworkQoSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *NetworkQoSApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NetworkQoSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *NetworkQoSApplyConfiguration) WithLabels(entries map[string]string) *NetworkQoSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *NetworkQoSApplyConfiguration) WithAnnotations(entries map[string]string) *NetworkQoSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *NetworkQoSApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NetworkQoSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *NetworkQoSApplyConfiguration) WithFinalizers(values ...string) *NetworkQoSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *NetworkQoSApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *NetworkQoSApplyConfiguration) WithSpec(value *SpecApplyConfiguration) *NetworkQoSApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *NetworkQoSApplyConfiguration) WithStatus(value *StatusApplyConfiguration) *NetworkQoSApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NetworkQoSApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/port.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/port.go new file mode 100644 index 0000000000..a828942403 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/port.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// PortApplyConfiguration represents a declarative configuration of the Port type for use +// with apply. +type PortApplyConfiguration struct { + Protocol *string `json:"protocol,omitempty"` + Port *int32 `json:"port,omitempty"` +} + +// PortApplyConfiguration constructs a declarative configuration of the Port type for use with +// apply. +func Port() *PortApplyConfiguration { + return &PortApplyConfiguration{} +} + +// WithProtocol sets the Protocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Protocol field is set to the value of the last call. +func (b *PortApplyConfiguration) WithProtocol(value string) *PortApplyConfiguration { + b.Protocol = &value + return b +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *PortApplyConfiguration) WithPort(value int32) *PortApplyConfiguration { + b.Port = &value + return b +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/rule.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/rule.go new file mode 100644 index 0000000000..6d332d3bb2 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/rule.go @@ -0,0 +1,56 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// RuleApplyConfiguration represents a declarative configuration of the Rule type for use +// with apply. +type RuleApplyConfiguration struct { + DSCP *int `json:"dscp,omitempty"` + Classifier *ClassifierApplyConfiguration `json:"classifier,omitempty"` + Bandwidth *BandwidthApplyConfiguration `json:"bandwidth,omitempty"` +} + +// RuleApplyConfiguration constructs a declarative configuration of the Rule type for use with +// apply. +func Rule() *RuleApplyConfiguration { + return &RuleApplyConfiguration{} +} + +// WithDSCP sets the DSCP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DSCP field is set to the value of the last call. +func (b *RuleApplyConfiguration) WithDSCP(value int) *RuleApplyConfiguration { + b.DSCP = &value + return b +} + +// WithClassifier sets the Classifier field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Classifier field is set to the value of the last call. +func (b *RuleApplyConfiguration) WithClassifier(value *ClassifierApplyConfiguration) *RuleApplyConfiguration { + b.Classifier = value + return b +} + +// WithBandwidth sets the Bandwidth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Bandwidth field is set to the value of the last call. +func (b *RuleApplyConfiguration) WithBandwidth(value *BandwidthApplyConfiguration) *RuleApplyConfiguration { + b.Bandwidth = value + return b +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/spec.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/spec.go new file mode 100644 index 0000000000..520e2a20e4 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/spec.go @@ -0,0 +1,77 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// SpecApplyConfiguration represents a declarative configuration of the Spec type for use +// with apply. +type SpecApplyConfiguration struct { + NetworkAttachmentRefs []v1.ObjectReference `json:"netAttachRefs,omitempty"` + PodSelector *metav1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` + Priority *int `json:"priority,omitempty"` + Egress []RuleApplyConfiguration `json:"egress,omitempty"` +} + +// SpecApplyConfiguration constructs a declarative configuration of the Spec type for use with +// apply. +func Spec() *SpecApplyConfiguration { + return &SpecApplyConfiguration{} +} + +// WithNetworkAttachmentRefs adds the given value to the NetworkAttachmentRefs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NetworkAttachmentRefs field. +func (b *SpecApplyConfiguration) WithNetworkAttachmentRefs(values ...v1.ObjectReference) *SpecApplyConfiguration { + for i := range values { + b.NetworkAttachmentRefs = append(b.NetworkAttachmentRefs, values[i]) + } + return b +} + +// WithPodSelector sets the PodSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodSelector field is set to the value of the last call. +func (b *SpecApplyConfiguration) WithPodSelector(value *metav1.LabelSelectorApplyConfiguration) *SpecApplyConfiguration { + b.PodSelector = value + return b +} + +// WithPriority sets the Priority field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Priority field is set to the value of the last call. +func (b *SpecApplyConfiguration) WithPriority(value int) *SpecApplyConfiguration { + b.Priority = &value + return b +} + +// WithEgress adds the given value to the Egress field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Egress field. +func (b *SpecApplyConfiguration) WithEgress(values ...*RuleApplyConfiguration) *SpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithEgress") + } + b.Egress = append(b.Egress, *values[i]) + } + return b +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/status.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/status.go new file mode 100644 index 0000000000..aed88afef0 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/status.go @@ -0,0 +1,56 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// StatusApplyConfiguration represents a declarative configuration of the Status type for use +// with apply. +type StatusApplyConfiguration struct { + Status *string `json:"status,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// StatusApplyConfiguration constructs a declarative configuration of the Status type for use with +// apply. +func Status() *StatusApplyConfiguration { + return &StatusApplyConfiguration{} +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *StatusApplyConfiguration) WithStatus(value string) *StatusApplyConfiguration { + b.Status = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *StatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *StatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/utils.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/utils.go new file mode 100644 index 0000000000..900d00fd22 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/utils.go @@ -0,0 +1,57 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package applyconfiguration + +import ( + v1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + internal "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/internal" + networkqosv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no +// apply configuration type exists for the given GroupVersionKind. +func ForKind(kind schema.GroupVersionKind) interface{} { + switch kind { + // Group=k8s.ovn.org, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithKind("Bandwidth"): + return &networkqosv1alpha1.BandwidthApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("Classifier"): + return &networkqosv1alpha1.ClassifierApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("Destination"): + return &networkqosv1alpha1.DestinationApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NetworkQoS"): + return &networkqosv1alpha1.NetworkQoSApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("Port"): + return &networkqosv1alpha1.PortApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("Rule"): + return &networkqosv1alpha1.RuleApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("Spec"): + return &networkqosv1alpha1.SpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("Status"): + return &networkqosv1alpha1.StatusApplyConfiguration{} + + } + return nil +} + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/clientset.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/clientset.go new file mode 100644 index 0000000000..df6ec4df46 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/clientset.go @@ -0,0 +1,119 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + k8sv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + K8sV1alpha1() k8sv1alpha1.K8sV1alpha1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + k8sV1alpha1 *k8sv1alpha1.K8sV1alpha1Client +} + +// K8sV1alpha1 retrieves the K8sV1alpha1Client +func (c *Clientset) K8sV1alpha1() k8sv1alpha1.K8sV1alpha1Interface { + return c.k8sV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.k8sV1alpha1, err = k8sv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.k8sV1alpha1 = k8sv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..b61e9993b1 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,121 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + applyconfiguration "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration" + clientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" + k8sv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1" + fakek8sv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfiguration.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// K8sV1alpha1 retrieves the K8sV1alpha1Client +func (c *Clientset) K8sV1alpha1() k8sv1alpha1.K8sV1alpha1Interface { + return &fakek8sv1alpha1.FakeK8sV1alpha1{Fake: &c.Fake} +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake/doc.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..19e0028ffb --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake/doc.go @@ -0,0 +1,19 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake/register.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..38ba821acf --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake/register.go @@ -0,0 +1,55 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + k8sv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + k8sv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/scheme/doc.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..1aec4021fc --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/scheme/doc.go @@ -0,0 +1,19 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/scheme/register.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..eb8b8af9d2 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/scheme/register.go @@ -0,0 +1,55 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + k8sv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + k8sv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/doc.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/doc.go new file mode 100644 index 0000000000..0e375e4fc2 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/doc.go @@ -0,0 +1,19 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/fake/doc.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/fake/doc.go new file mode 100644 index 0000000000..422564f2d5 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/fake/doc.go @@ -0,0 +1,19 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/fake/fake_networkqos.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/fake/fake_networkqos.go new file mode 100644 index 0000000000..7ccb48963e --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/fake/fake_networkqos.go @@ -0,0 +1,50 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + networkqosv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1" + typednetworkqosv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1" + gentype "k8s.io/client-go/gentype" +) + +// fakeNetworkQoSes implements NetworkQoSInterface +type fakeNetworkQoSes struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.NetworkQoS, *v1alpha1.NetworkQoSList, *networkqosv1alpha1.NetworkQoSApplyConfiguration] + Fake *FakeK8sV1alpha1 +} + +func newFakeNetworkQoSes(fake *FakeK8sV1alpha1, namespace string) typednetworkqosv1alpha1.NetworkQoSInterface { + return &fakeNetworkQoSes{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.NetworkQoS, *v1alpha1.NetworkQoSList, *networkqosv1alpha1.NetworkQoSApplyConfiguration]( + fake.Fake, + namespace, + v1alpha1.SchemeGroupVersion.WithResource("networkqoses"), + v1alpha1.SchemeGroupVersion.WithKind("NetworkQoS"), + func() *v1alpha1.NetworkQoS { return &v1alpha1.NetworkQoS{} }, + func() *v1alpha1.NetworkQoSList { return &v1alpha1.NetworkQoSList{} }, + func(dst, src *v1alpha1.NetworkQoSList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.NetworkQoSList) []*v1alpha1.NetworkQoS { return gentype.ToPointerSlice(list.Items) }, + func(list *v1alpha1.NetworkQoSList, items []*v1alpha1.NetworkQoS) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/fake/fake_networkqos_client.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/fake/fake_networkqos_client.go new file mode 100644 index 0000000000..ddfcb9e789 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/fake/fake_networkqos_client.go @@ -0,0 +1,39 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeK8sV1alpha1 struct { + *testing.Fake +} + +func (c *FakeK8sV1alpha1) NetworkQoSes(namespace string) v1alpha1.NetworkQoSInterface { + return newFakeNetworkQoSes(c, namespace) +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeK8sV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/generated_expansion.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..474127f124 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/generated_expansion.go @@ -0,0 +1,20 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type NetworkQoSExpansion interface{} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/networkqos.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/networkqos.go new file mode 100644 index 0000000000..2381822dbe --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/networkqos.go @@ -0,0 +1,73 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + + networkqosv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + applyconfigurationnetworkqosv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1" + scheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// NetworkQoSesGetter has a method to return a NetworkQoSInterface. +// A group's client should implement this interface. +type NetworkQoSesGetter interface { + NetworkQoSes(namespace string) NetworkQoSInterface +} + +// NetworkQoSInterface has methods to work with NetworkQoS resources. +type NetworkQoSInterface interface { + Create(ctx context.Context, networkQoS *networkqosv1alpha1.NetworkQoS, opts v1.CreateOptions) (*networkqosv1alpha1.NetworkQoS, error) + Update(ctx context.Context, networkQoS *networkqosv1alpha1.NetworkQoS, opts v1.UpdateOptions) (*networkqosv1alpha1.NetworkQoS, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, networkQoS *networkqosv1alpha1.NetworkQoS, opts v1.UpdateOptions) (*networkqosv1alpha1.NetworkQoS, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkqosv1alpha1.NetworkQoS, error) + List(ctx context.Context, opts v1.ListOptions) (*networkqosv1alpha1.NetworkQoSList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkqosv1alpha1.NetworkQoS, err error) + Apply(ctx context.Context, networkQoS *applyconfigurationnetworkqosv1alpha1.NetworkQoSApplyConfiguration, opts v1.ApplyOptions) (result *networkqosv1alpha1.NetworkQoS, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, networkQoS *applyconfigurationnetworkqosv1alpha1.NetworkQoSApplyConfiguration, opts v1.ApplyOptions) (result *networkqosv1alpha1.NetworkQoS, err error) + NetworkQoSExpansion +} + +// networkQoSes implements NetworkQoSInterface +type networkQoSes struct { + *gentype.ClientWithListAndApply[*networkqosv1alpha1.NetworkQoS, *networkqosv1alpha1.NetworkQoSList, *applyconfigurationnetworkqosv1alpha1.NetworkQoSApplyConfiguration] +} + +// newNetworkQoSes returns a NetworkQoSes +func newNetworkQoSes(c *K8sV1alpha1Client, namespace string) *networkQoSes { + return &networkQoSes{ + gentype.NewClientWithListAndApply[*networkqosv1alpha1.NetworkQoS, *networkqosv1alpha1.NetworkQoSList, *applyconfigurationnetworkqosv1alpha1.NetworkQoSApplyConfiguration]( + "networkqoses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *networkqosv1alpha1.NetworkQoS { return &networkqosv1alpha1.NetworkQoS{} }, + func() *networkqosv1alpha1.NetworkQoSList { return &networkqosv1alpha1.NetworkQoSList{} }, + ), + } +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/networkqos_client.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/networkqos_client.go new file mode 100644 index 0000000000..329c642e9c --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/typed/networkqos/v1alpha1/networkqos_client.go @@ -0,0 +1,106 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + http "net/http" + + networkqosv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + scheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type K8sV1alpha1Interface interface { + RESTClient() rest.Interface + NetworkQoSesGetter +} + +// K8sV1alpha1Client is used to interact with features provided by the k8s.ovn.org group. +type K8sV1alpha1Client struct { + restClient rest.Interface +} + +func (c *K8sV1alpha1Client) NetworkQoSes(namespace string) NetworkQoSInterface { + return newNetworkQoSes(c, namespace) +} + +// NewForConfig creates a new K8sV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*K8sV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new K8sV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*K8sV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &K8sV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new K8sV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *K8sV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new K8sV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *K8sV1alpha1Client { + return &K8sV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := networkqosv1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *K8sV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/factory.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/factory.go new file mode 100644 index 0000000000..256a9cf5d1 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/factory.go @@ -0,0 +1,261 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" + internalinterfaces "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/internalinterfaces" + networkqos "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + K8s() networkqos.Interface +} + +func (f *sharedInformerFactory) K8s() networkqos.Interface { + return networkqos.New(f, f.namespace, f.tweakListOptions) +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/generic.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/generic.go new file mode 100644 index 0000000000..53377ad262 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/generic.go @@ -0,0 +1,61 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + fmt "fmt" + + v1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=k8s.ovn.org, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("networkqoses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.K8s().V1alpha1().NetworkQoSes().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..b53e7b5784 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,39 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/interface.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/interface.go new file mode 100644 index 0000000000..089ac8d769 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/interface.go @@ -0,0 +1,45 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package networkqos + +import ( + internalinterfaces "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1alpha1/interface.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1alpha1/interface.go new file mode 100644 index 0000000000..8ba24801e1 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1alpha1/interface.go @@ -0,0 +1,44 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // NetworkQoSes returns a NetworkQoSInformer. + NetworkQoSes() NetworkQoSInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// NetworkQoSes returns a NetworkQoSInformer. +func (v *version) NetworkQoSes() NetworkQoSInformer { + return &networkQoSInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1alpha1/networkqos.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1alpha1/networkqos.go new file mode 100644 index 0000000000..a1896ab27b --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1alpha1/networkqos.go @@ -0,0 +1,89 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + time "time" + + crdnetworkqosv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + versioned "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" + internalinterfaces "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/internalinterfaces" + networkqosv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// NetworkQoSInformer provides access to a shared informer and lister for +// NetworkQoSes. +type NetworkQoSInformer interface { + Informer() cache.SharedIndexInformer + Lister() networkqosv1alpha1.NetworkQoSLister +} + +type networkQoSInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewNetworkQoSInformer constructs a new informer for NetworkQoS type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNetworkQoSInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNetworkQoSInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredNetworkQoSInformer constructs a new informer for NetworkQoS type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNetworkQoSInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sV1alpha1().NetworkQoSes(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sV1alpha1().NetworkQoSes(namespace).Watch(context.TODO(), options) + }, + }, + &crdnetworkqosv1alpha1.NetworkQoS{}, + resyncPeriod, + indexers, + ) +} + +func (f *networkQoSInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNetworkQoSInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *networkQoSInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&crdnetworkqosv1alpha1.NetworkQoS{}, f.defaultInformer) +} + +func (f *networkQoSInformer) Lister() networkqosv1alpha1.NetworkQoSLister { + return networkqosv1alpha1.NewNetworkQoSLister(f.Informer().GetIndexer()) +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1/expansion_generated.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1/expansion_generated.go new file mode 100644 index 0000000000..baf31eb955 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1/expansion_generated.go @@ -0,0 +1,26 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// NetworkQoSListerExpansion allows custom methods to be added to +// NetworkQoSLister. +type NetworkQoSListerExpansion interface{} + +// NetworkQoSNamespaceListerExpansion allows custom methods to be added to +// NetworkQoSNamespaceLister. +type NetworkQoSNamespaceListerExpansion interface{} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1/networkqos.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1/networkqos.go new file mode 100644 index 0000000000..acaa632fdb --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1/networkqos.go @@ -0,0 +1,69 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + networkqosv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// NetworkQoSLister helps list NetworkQoSes. +// All objects returned here must be treated as read-only. +type NetworkQoSLister interface { + // List lists all NetworkQoSes in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*networkqosv1alpha1.NetworkQoS, err error) + // NetworkQoSes returns an object that can list and get NetworkQoSes. + NetworkQoSes(namespace string) NetworkQoSNamespaceLister + NetworkQoSListerExpansion +} + +// networkQoSLister implements the NetworkQoSLister interface. +type networkQoSLister struct { + listers.ResourceIndexer[*networkqosv1alpha1.NetworkQoS] +} + +// NewNetworkQoSLister returns a new NetworkQoSLister. +func NewNetworkQoSLister(indexer cache.Indexer) NetworkQoSLister { + return &networkQoSLister{listers.New[*networkqosv1alpha1.NetworkQoS](indexer, networkqosv1alpha1.Resource("networkqos"))} +} + +// NetworkQoSes returns an object that can list and get NetworkQoSes. +func (s *networkQoSLister) NetworkQoSes(namespace string) NetworkQoSNamespaceLister { + return networkQoSNamespaceLister{listers.NewNamespaced[*networkqosv1alpha1.NetworkQoS](s.ResourceIndexer, namespace)} +} + +// NetworkQoSNamespaceLister helps list and get NetworkQoSes. +// All objects returned here must be treated as read-only. +type NetworkQoSNamespaceLister interface { + // List lists all NetworkQoSes in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*networkqosv1alpha1.NetworkQoS, err error) + // Get retrieves the NetworkQoS from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*networkqosv1alpha1.NetworkQoS, error) + NetworkQoSNamespaceListerExpansion +} + +// networkQoSNamespaceLister implements the NetworkQoSNamespaceLister +// interface. +type networkQoSNamespaceLister struct { + listers.ResourceIndexer[*networkqosv1alpha1.NetworkQoS] +} diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/zz_generated.deepcopy.go b/go-controller/pkg/crd/networkqos/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..407bc5a4e6 --- /dev/null +++ b/go-controller/pkg/crd/networkqos/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,246 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bandwidth) DeepCopyInto(out *Bandwidth) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bandwidth. +func (in *Bandwidth) DeepCopy() *Bandwidth { + if in == nil { + return nil + } + out := new(Bandwidth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Classifier) DeepCopyInto(out *Classifier) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = make([]Destination, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Port = in.Port + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Classifier. +func (in *Classifier) DeepCopy() *Classifier { + if in == nil { + return nil + } + out := new(Classifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Destination) DeepCopyInto(out *Destination) { + *out = *in + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.IPBlock != nil { + in, out := &in.IPBlock, &out.IPBlock + *out = new(networkingv1.IPBlock) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Destination. +func (in *Destination) DeepCopy() *Destination { + if in == nil { + return nil + } + out := new(Destination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkQoS) DeepCopyInto(out *NetworkQoS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkQoS. +func (in *NetworkQoS) DeepCopy() *NetworkQoS { + if in == nil { + return nil + } + out := new(NetworkQoS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkQoS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkQoSList) DeepCopyInto(out *NetworkQoSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkQoS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkQoSList. +func (in *NetworkQoSList) DeepCopy() *NetworkQoSList { + if in == nil { + return nil + } + out := new(NetworkQoSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkQoSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Port) DeepCopyInto(out *Port) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Port. +func (in *Port) DeepCopy() *Port { + if in == nil { + return nil + } + out := new(Port) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + in.Classifier.DeepCopyInto(&out.Classifier) + out.Bandwidth = in.Bandwidth + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Spec) DeepCopyInto(out *Spec) { + *out = *in + if in.NetworkAttachmentRefs != nil { + in, out := &in.NetworkAttachmentRefs, &out.NetworkAttachmentRefs + *out = make([]corev1.ObjectReference, len(*in)) + copy(*out, *in) + } + in.PodSelector.DeepCopyInto(&out.PodSelector) + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec. +func (in *Spec) DeepCopy() *Spec { + if in == nil { + return nil + } + out := new(Spec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. +func (in *Status) DeepCopy() *Status { + if in == nil { + return nil + } + out := new(Status) + in.DeepCopyInto(out) + return out +} From 668bce9317d4e2a4d65e1a0f72a81091fb4baf23 Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Sat, 13 Jul 2024 16:52:59 +0000 Subject: [PATCH 04/18] Add flag to enable the feature This commit adds a new flag called enable-network-qos, which can be used to feature gate the Network QoS feature. The following flag is exposed in kind scripts as -nqe or --network-qos-enable: kind.sh -nqe kind-helm.sh -nqe Alternatively, it can be enabled via an exported shell variable: OVN_NETWORK_QOS_ENABLE=true kind.sh Signed-off-by: Flavio Fernandes (cherry picked from commit 15058e582215da9abef45a3e74ce34c3c93f85d4) --- contrib/kind-helm.sh | 7 ++++ contrib/kind.sh | 8 ++++ dist/images/daemonset.sh | 15 ++++++++ dist/images/ovnkube.sh | 38 +++++++++++++++++++ dist/templates/ovnkube-control-plane.yaml.j2 | 2 + dist/templates/ovnkube-master.yaml.j2 | 2 + dist/templates/ovnkube-node.yaml.j2 | 2 + .../ovnkube-single-node-zone.yaml.j2 | 2 + .../templates/ovnkube-zone-controller.yaml.j2 | 2 + docs/features/network-qos.md | 20 ++++++++++ .../launching-ovn-kubernetes-on-kind.md | 2 + .../launching-ovn-kubernetes-with-helm.md | 9 +++++ go-controller/pkg/config/config.go | 6 +++ helm/ovn-kubernetes/README.md | 9 +++++ .../templates/ovnkube-control-plane.yaml | 2 + .../templates/deployment-ovnkube-master.yaml | 2 + .../templates/ovnkube-node-dpu-host.yaml | 2 + .../templates/ovnkube-node-dpu.yaml | 2 + .../ovnkube-node/templates/ovnkube-node.yaml | 2 + .../templates/ovnkube-single-node-zone.yaml | 2 + .../templates/ovnkube-zone-controller.yaml | 2 + .../values-multi-node-zone.yaml | 2 + helm/ovn-kubernetes/values-no-ic.yaml | 2 + .../values-single-node-zone.yaml | 2 + mkdocs.yml | 1 + 25 files changed, 145 insertions(+) create mode 100644 docs/features/network-qos.md diff --git a/contrib/kind-helm.sh b/contrib/kind-helm.sh index 462f4bbf03..a30eed592b 100755 --- a/contrib/kind-helm.sh +++ b/contrib/kind-helm.sh @@ -27,6 +27,7 @@ set_default_params() { export KIND_REMOVE_TAINT=${KIND_REMOVE_TAINT:-true} export ENABLE_MULTI_NET=${ENABLE_MULTI_NET:-false} export ENABLE_NETWORK_SEGMENTATION=${ENABLE_NETWORK_SEGMENTATION:-false} + export OVN_NETWORK_QOS_ENABLE=${OVN_NETWORK_QOS_ENABLE:-false} export KIND_NUM_WORKER=${KIND_NUM_WORKER:-2} export KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-ovn} export OVN_IMAGE=${OVN_IMAGE:-'ghcr.io/ovn-kubernetes/ovn-kubernetes/ovn-kube-ubuntu:helm'} @@ -98,6 +99,7 @@ usage() { echo " [ -ikv | --install-kubevirt ]" echo " [ -mne | --multi-network-enable ]" echo " [ -nse | --network-segmentation-enable ]" + echo " [ -nqe | --network-qos-enable ]" echo " [ -wk | --num-workers ]" echo " [ -ic | --enable-interconnect]" echo " [ -npz | --node-per-zone ]" @@ -119,6 +121,7 @@ usage() { echo "-ikv | --install-kubevirt Install kubevirt" echo "-mne | --multi-network-enable Enable multi networks. DEFAULT: Disabled" echo "-nse | --network-segmentation-enable Enable network segmentation. DEFAULT: Disabled" + echo "-nqe | --network-qos-enable Enable network QoS. DEFAULT: Disabled" echo "-ha | --ha-enabled Enable high availability. DEFAULT: HA Disabled" echo "-wk | --num-workers Number of worker nodes. DEFAULT: 2 workers" echo "-cn | --cluster-name Configure the kind cluster's name" @@ -165,6 +168,8 @@ parse_args() { ;; -nse | --network-segmentation-enable) ENABLE_NETWORK_SEGMENTATION=true ;; + -nqe | --network-qos-enable ) OVN_NETWORK_QOS_ENABLE=true + ;; -ha | --ha-enabled ) OVN_HA=true KIND_NUM_MASTER=3 ;; @@ -218,6 +223,7 @@ print_params() { echo "KIND_REMOVE_TAINT = $KIND_REMOVE_TAINT" echo "ENABLE_MULTI_NET = $ENABLE_MULTI_NET" echo "ENABLE_NETWORK_SEGMENTATION = $ENABLE_NETWORK_SEGMENTATION" + echo "OVN_NETWORK_QOS_ENABLE = $OVN_NETWORK_QOS_ENABLE" echo "OVN_IMAGE = $OVN_IMAGE" echo "KIND_NUM_MASTER = $KIND_NUM_MASTER" echo "KIND_NUM_WORKER = $KIND_NUM_WORKER" @@ -414,6 +420,7 @@ helm install ovn-kubernetes . -f "${value_file}" \ --set global.enableObservability=$(if [ "${OVN_OBSERV_ENABLE}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.emptyLbEvents=$(if [ "${OVN_EMPTY_LB_EVENTS}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.enableDNSNameResolver=$(if [ "${OVN_ENABLE_DNSNAMERESOLVER}" == "true" ]; then echo "true"; else echo "false"; fi) \ + --set global.enableNetworkQos=$(if [ "${OVN_NETWORK_QOS_ENABLE}" == "true" ]; then echo "true"; else echo "false"; fi) \ ${ovnkube_db_options} EOF ) diff --git a/contrib/kind.sh b/contrib/kind.sh index 166415a763..8c3f6eca6d 100755 --- a/contrib/kind.sh +++ b/contrib/kind.sh @@ -82,6 +82,7 @@ usage() { echo " [-ic | --enable-interconnect]" echo " [-rae | --enable-route-advertisements]" echo " [-adv | --advertise-default-network]" + echo " [-nqe | --network-qos-enable]" echo " [--isolated]" echo " [-dns | --enable-dnsnameresolver]" echo " [-obs | --observability]" @@ -141,6 +142,7 @@ usage() { echo "-sm | --scale-metrics Enable scale metrics" echo "-cm | --compact-mode Enable compact mode, ovnkube master and node run in the same process." echo "-ic | --enable-interconnect Enable interconnect with each node as a zone (only valid if OVN_HA is false)" + echo "-nqe | --network-qos-enable Enable network QoS. DEFAULT: Disabled." echo "--disable-ovnkube-identity Disable per-node cert and ovnkube-identity webhook" echo "-npz | --nodes-per-zone If interconnect is enabled, number of nodes per zone (Default 1). If this value > 1, then (total k8s nodes (workers + 1) / num of nodes per zone) should be zero." echo "-mtu Define the overlay mtu" @@ -346,6 +348,8 @@ parse_args() { -mtu ) shift OVN_MTU=$1 ;; + -nqe | --network-qos-enable ) OVN_NETWORK_QOS_ENABLE=true + ;; --delete ) delete exit ;; @@ -439,6 +443,7 @@ print_params() { fi fi echo "OVN_ENABLE_OVNKUBE_IDENTITY = $OVN_ENABLE_OVNKUBE_IDENTITY" + echo "OVN_NETWORK_QOS_ENABLE = $OVN_NETWORK_QOS_ENABLE" echo "KIND_NUM_WORKER = $KIND_NUM_WORKER" echo "OVN_MTU= $OVN_MTU" echo "OVN_ENABLE_DNSNAMERESOLVER= $OVN_ENABLE_DNSNAMERESOLVER" @@ -603,6 +608,7 @@ set_default_params() { KIND_NUM_MASTER=1 OVN_ENABLE_INTERCONNECT=${OVN_ENABLE_INTERCONNECT:-false} OVN_ENABLE_OVNKUBE_IDENTITY=${OVN_ENABLE_OVNKUBE_IDENTITY:-true} + OVN_NETWORK_QOS_ENABLE=${OVN_NETWORK_QOS_ENABLE:-false} if [ "$OVN_COMPACT_MODE" == true ] && [ "$OVN_ENABLE_INTERCONNECT" != false ]; then @@ -901,6 +907,7 @@ create_ovn_kube_manifests() { --enable-multi-external-gateway=true \ --enable-ovnkube-identity="${OVN_ENABLE_OVNKUBE_IDENTITY}" \ --enable-persistent-ips=true \ + --network-qos-enable="${OVN_NETWORK_QOS_ENABLE}" \ --mtu="${OVN_MTU}" \ --enable-dnsnameresolver="${OVN_ENABLE_DNSNAMERESOLVER}" \ --mtu="${OVN_MTU}" \ @@ -985,6 +992,7 @@ install_ovn() { run_kubectl apply -f k8s.ovn.org_egressqoses.yaml run_kubectl apply -f k8s.ovn.org_egressservices.yaml run_kubectl apply -f k8s.ovn.org_adminpolicybasedexternalroutes.yaml + run_kubectl apply -f k8s.ovn.org_networkqoses.yaml run_kubectl apply -f k8s.ovn.org_userdefinednetworks.yaml run_kubectl apply -f k8s.ovn.org_clusteruserdefinednetworks.yaml run_kubectl apply -f k8s.ovn.org_routeadvertisements.yaml diff --git a/dist/images/daemonset.sh b/dist/images/daemonset.sh index d6d883dff0..95e4a503e8 100755 --- a/dist/images/daemonset.sh +++ b/dist/images/daemonset.sh @@ -95,6 +95,7 @@ OVN_ENABLE_INTERCONNECT= OVN_ENABLE_OVNKUBE_IDENTITY="true" OVN_ENABLE_PERSISTENT_IPS= OVN_ENABLE_SVC_TEMPLATE_SUPPORT="true" +OVN_NETWORK_QOS_ENABLE= OVN_ENABLE_DNSNAMERESOLVER="false" OVN_NOHOSTSUBNET_LABEL="" OVN_DISABLE_REQUESTEDCHASSIS="false" @@ -362,6 +363,9 @@ while [ "$1" != "" ]; do --enable-svc-template-support) OVN_ENABLE_SVC_TEMPLATE_SUPPORT=$VALUE ;; + --network-qos-enable) + OVN_NETWORK_QOS_ENABLE=$VALUE + ;; --enable-dnsnameresolver) OVN_ENABLE_DNSNAMERESOLVER=$VALUE ;; @@ -565,6 +569,9 @@ echo "ovn_enable_persistent_ips: ${ovn_enable_persistent_ips}" ovn_enable_svc_template_support=${OVN_ENABLE_SVC_TEMPLATE_SUPPORT} echo "ovn_enable_svc_template_support: ${ovn_enable_svc_template_support}" +ovn_network_qos_enable=${OVN_NETWORK_QOS_ENABLE} +echo "ovn_network_qos_enable: ${ovn_network_qos_enable}" + ovn_enable_dnsnameresolver=${OVN_ENABLE_DNSNAMERESOLVER} echo "ovn_enable_dnsnameresolver: ${ovn_enable_dnsnameresolver}" @@ -627,6 +634,7 @@ ovn_image=${ovnkube_image} \ ovn_enable_multi_external_gateway=${ovn_enable_multi_external_gateway} \ ovn_enable_ovnkube_identity=${ovn_enable_ovnkube_identity} \ ovn_observ_enable=${ovn_observ_enable} \ + ovn_network_qos_enable=${ovn_network_qos_enable} \ ovnkube_app_name=ovnkube-node \ jinjanate ../templates/ovnkube-node.yaml.j2 -o ${output_dir}/ovnkube-node.yaml @@ -680,6 +688,7 @@ ovn_image=${ovnkube_image} \ ovn_enable_multi_external_gateway=${ovn_enable_multi_external_gateway} \ ovn_enable_ovnkube_identity=${ovn_enable_ovnkube_identity} \ ovn_observ_enable=${ovn_observ_enable} \ + ovn_network_qos_enable=${ovn_network_qos_enable} \ ovnkube_app_name=ovnkube-node-dpu \ jinjanate ../templates/ovnkube-node.yaml.j2 -o ${output_dir}/ovnkube-node-dpu.yaml @@ -722,6 +731,7 @@ ovn_image=${image} \ ovn_ex_gw_networking_interface=${ovn_ex_gw_networking_interface} \ ovnkube_node_mgmt_port_netdev=${ovnkube_node_mgmt_port_netdev} \ ovn_enable_ovnkube_identity=${ovn_enable_ovnkube_identity} \ + ovn_network_qos_enable=${ovn_network_qos_enable} \ ovnkube_app_name=ovnkube-node-dpu-host \ jinjanate ../templates/ovnkube-node.yaml.j2 -o ${output_dir}/ovnkube-node-dpu-host.yaml @@ -768,6 +778,7 @@ ovn_image=${ovnkube_image} \ ovn_unprivileged_mode=${ovn_unprivileged_mode} \ ovn_enable_multi_external_gateway=${ovn_enable_multi_external_gateway} \ ovn_enable_ovnkube_identity=${ovn_enable_ovnkube_identity} \ + ovn_network_qos_enable=${ovn_network_qos_enable} \ ovn_enable_persistent_ips=${ovn_enable_persistent_ips} \ ovn_enable_svc_template_support=${ovn_enable_svc_template_support} \ ovn_enable_dnsnameresolver=${ovn_enable_dnsnameresolver} \ @@ -812,6 +823,7 @@ ovn_image=${ovnkube_image} \ ovn_enable_interconnect=${ovn_enable_interconnect} \ ovn_enable_multi_external_gateway=${ovn_enable_multi_external_gateway} \ ovn_enable_ovnkube_identity=${ovn_enable_ovnkube_identity} \ + ovn_network_qos_enable=${ovn_network_qos_enable} \ ovn_v4_transit_switch_subnet=${ovn_v4_transit_switch_subnet} \ ovn_v6_transit_switch_subnet=${ovn_v6_transit_switch_subnet} \ ovn_enable_persistent_ips=${ovn_enable_persistent_ips} \ @@ -909,6 +921,7 @@ ovn_image=${ovnkube_image} \ ovn_enable_interconnect=${ovn_enable_interconnect} \ ovn_enable_multi_external_gateway=${ovn_enable_multi_external_gateway} \ ovn_enable_ovnkube_identity=${ovn_enable_ovnkube_identity} \ + ovn_network_qos_enable=${ovn_network_qos_enable} \ ovn_northd_backoff_interval=${ovn_northd_backoff_interval} \ ovn_enable_persistent_ips=${ovn_enable_persistent_ips} \ ovn_enable_svc_template_support=${ovn_enable_svc_template_support} \ @@ -974,6 +987,7 @@ ovn_image=${ovnkube_image} \ ovn_enable_interconnect=${ovn_enable_interconnect} \ ovn_enable_multi_external_gateway=${ovn_enable_multi_external_gateway} \ ovn_enable_ovnkube_identity=${ovn_enable_ovnkube_identity} \ + ovn_network_qos_enable=${ovn_network_qos_enable} \ ovn_northd_backoff_interval=${ovn_enable_backoff_interval} \ ovn_enable_persistent_ips=${ovn_enable_persistent_ips} \ ovn_enable_svc_template_support=${ovn_enable_svc_template_support} \ @@ -1060,6 +1074,7 @@ cp ../templates/k8s.ovn.org_egressips.yaml.j2 ${output_dir}/k8s.ovn.org_egressip cp ../templates/k8s.ovn.org_egressqoses.yaml.j2 ${output_dir}/k8s.ovn.org_egressqoses.yaml cp ../templates/k8s.ovn.org_egressservices.yaml.j2 ${output_dir}/k8s.ovn.org_egressservices.yaml cp ../templates/k8s.ovn.org_adminpolicybasedexternalroutes.yaml.j2 ${output_dir}/k8s.ovn.org_adminpolicybasedexternalroutes.yaml +cp ../templates/k8s.ovn.org_networkqoses.yaml.j2 ${output_dir}/k8s.ovn.org_networkqoses.yaml cp ../templates/k8s.ovn.org_userdefinednetworks.yaml.j2 ${output_dir}/k8s.ovn.org_userdefinednetworks.yaml cp ../templates/k8s.ovn.org_clusteruserdefinednetworks.yaml.j2 ${output_dir}/k8s.ovn.org_clusteruserdefinednetworks.yaml cp ../templates/k8s.ovn.org_routeadvertisements.yaml.j2 ${output_dir}/k8s.ovn.org_routeadvertisements.yaml diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index e4327397ef..a8b4399b4c 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -311,6 +311,9 @@ ovnkube_compact_mode_enable=${OVNKUBE_COMPACT_MODE_ENABLE:-false} ovn_northd_backoff_interval=${OVN_NORTHD_BACKOFF_INTERVAL:-"300"} # OVN_ENABLE_SVC_TEMPLATE_SUPPORT - enable svc template support ovn_enable_svc_template_support=${OVN_ENABLE_SVC_TEMPLATE_SUPPORT:-true} + +#OVN_NETWORK_QOS_ENABLE - enable network QoS for ovn-kubernetes +ovn_network_qos_enable=${OVN_NETWORK_QOS_ENABLE:-false} # OVN_ENABLE_DNSNAMERESOLVER - enable dns name resolver support ovn_enable_dnsnameresolver=${OVN_ENABLE_DNSNAMERESOLVER:-false} # OVN_OBSERV_ENABLE - enable observability for ovnkube @@ -1292,6 +1295,12 @@ ovn-master() { fi echo "ovn_disable_requestedchassis_flag=${ovn_disable_requestedchassis_flag}" + network_qos_enabled_flag= + if [[ ${ovn_network_qos_enable} == "true" ]]; then + network_qos_enabled_flag="--enable-network-qos" + fi + echo "network_qos_enabled_flag=${network_qos_enabled_flag}" + init_node_flags= if [[ ${ovnkube_compact_mode_enable} == "true" ]]; then init_node_flags="--init-node ${K8S_NODE} --nodeport" @@ -1344,6 +1353,7 @@ ovn-master() { ${ovn_v6_join_subnet_opt} \ ${ovn_v6_masquerade_subnet_opt} \ ${persistent_ips_enabled_flag} \ + ${network_qos_enabled_flag} \ ${ovn_enable_dnsnameresolver_flag} \ ${nohostsubnet_label_option} \ ${ovn_disable_requestedchassis_flag} \ @@ -1598,6 +1608,12 @@ ovnkube-controller() { fi echo "ovn_enable_svc_template_support_flag=${ovn_enable_svc_template_support_flag}" + network_qos_enabled_flag= + if [[ ${ovn_network_qos_enable} == "true" ]]; then + network_qos_enabled_flag="--enable-network-qos" + fi + echo "network_qos_enabled_flag=${network_qos_enabled_flag}" + ovn_enable_dnsnameresolver_flag= if [[ ${ovn_enable_dnsnameresolver} == "true" ]]; then ovn_enable_dnsnameresolver_flag="--enable-dns-name-resolver" @@ -1642,6 +1658,7 @@ ovnkube-controller() { ${ovn_v4_masquerade_subnet_opt} \ ${ovn_v6_join_subnet_opt} \ ${ovn_v6_masquerade_subnet_opt} \ + ${network_qos_enabled_flag} \ ${ovn_enable_dnsnameresolver_flag} \ --cluster-subnets ${net_cidr} --k8s-service-cidr=${svc_cidr} \ --gateway-mode=${ovn_gateway_mode} \ @@ -2019,6 +2036,12 @@ ovnkube-controller-with-node() { fi echo "ovn_enable_svc_template_support_flag=${ovn_enable_svc_template_support_flag}" + network_qos_enabled_flag= + if [[ ${ovn_network_qos_enable} == "true" ]]; then + network_qos_enabled_flag="--enable-network-qos" + fi + echo "network_qos_enabled_flag=${network_qos_enabled_flag}" + ovn_enable_dnsnameresolver_flag= if [[ ${ovn_enable_dnsnameresolver} == "true" ]]; then ovn_enable_dnsnameresolver_flag="--enable-dns-name-resolver" @@ -2081,6 +2104,7 @@ ovnkube-controller-with-node() { ${routable_mtu_flag} \ ${sflow_targets} \ ${ssl_opts} \ + ${network_qos_enabled_flag} \ ${ovn_enable_dnsnameresolver_flag} \ --cluster-subnets ${net_cidr} --k8s-service-cidr=${svc_cidr} \ --export-ovs-metrics \ @@ -2264,6 +2288,12 @@ ovn-cluster-manager() { fi echo "empty_lb_events_flag=${empty_lb_events_flag}" + network_qos_enabled_flag= + if [[ ${ovn_network_qos_enable} == "true" ]]; then + network_qos_enabled_flag="--enable-network-qos" + fi + echo "network_qos_enabled_flag=${network_qos_enabled_flag}" + ovn_enable_dnsnameresolver_flag= if [[ ${ovn_enable_dnsnameresolver} == "true" ]]; then ovn_enable_dnsnameresolver_flag="--enable-dns-name-resolver" @@ -2295,6 +2325,7 @@ ovn-cluster-manager() { ${ovn_v6_masquerade_subnet_opt} \ ${ovn_v4_transit_switch_subnet_opt} \ ${ovn_v6_transit_switch_subnet_opt} \ + ${network_qos_enabled_flag} \ ${ovn_enable_dnsnameresolver_flag} \ --cluster-subnets ${net_cidr} --k8s-service-cidr=${svc_cidr} \ --host-network-namespace ${ovn_host_network_namespace} \ @@ -2655,6 +2686,12 @@ ovn-node() { fi echo "ovn_conntrack_zone_flag=${ovn_conntrack_zone_flag}" + network_qos_enabled_flag= + if [[ ${ovn_network_qos_enable} == "true" ]]; then + network_qos_enabled_flag="--enable-network-qos" + fi + echo "network_qos_enabled_flag=${network_qos_enabled_flag}" + ovn_v4_masquerade_subnet_opt= if [[ -n ${ovn_v4_masquerade_subnet} ]]; then ovn_v4_masquerade_subnet_opt="--gateway-v4-masquerade-subnet=${ovn_v4_masquerade_subnet}" @@ -2705,6 +2742,7 @@ ovn-node() { ${ovn_unprivileged_flag} \ ${routable_mtu_flag} \ ${sflow_targets} \ + ${network_qos_enabled_flag} \ --cluster-subnets ${net_cidr} --k8s-service-cidr=${svc_cidr} \ --export-ovs-metrics \ --gateway-mode=${ovn_gateway_mode} ${ovn_gateway_opts} \ diff --git a/dist/templates/ovnkube-control-plane.yaml.j2 b/dist/templates/ovnkube-control-plane.yaml.j2 index af72a364a2..2373f38cff 100644 --- a/dist/templates/ovnkube-control-plane.yaml.j2 +++ b/dist/templates/ovnkube-control-plane.yaml.j2 @@ -181,6 +181,8 @@ spec: value: "{{ ovn_v6_transit_switch_subnet }}" - name: OVN_ENABLE_PERSISTENT_IPS value: "{{ ovn_enable_persistent_ips }}" + - name: OVN_NETWORK_QOS_ENABLE + value: "{{ ovn_network_qos_enable }}" - name: OVN_ENABLE_DNSNAMERESOLVER value: "{{ ovn_enable_dnsnameresolver }}" # end of container diff --git a/dist/templates/ovnkube-master.yaml.j2 b/dist/templates/ovnkube-master.yaml.j2 index 389a539dff..47ea81a6dc 100644 --- a/dist/templates/ovnkube-master.yaml.j2 +++ b/dist/templates/ovnkube-master.yaml.j2 @@ -308,6 +308,8 @@ spec: key: host_network_namespace - name: OVN_ENABLE_PERSISTENT_IPS value: "{{ ovn_enable_persistent_ips }}" + - name: OVN_NETWORK_QOS_ENABLE + value: "{{ ovn_network_qos_enable }}" - name: OVN_ENABLE_DNSNAMERESOLVER value: "{{ ovn_enable_dnsnameresolver }}" # end of container diff --git a/dist/templates/ovnkube-node.yaml.j2 b/dist/templates/ovnkube-node.yaml.j2 index 8fea157646..98591a5ac1 100644 --- a/dist/templates/ovnkube-node.yaml.j2 +++ b/dist/templates/ovnkube-node.yaml.j2 @@ -217,6 +217,8 @@ spec: value: "{{ ovn_ex_gw_networking_interface }}" - name: OVN_ENABLE_OVNKUBE_IDENTITY value: "{{ ovn_enable_ovnkube_identity }}" + - name: OVN_NETWORK_QOS_ENABLE + value: "{{ ovn_network_qos_enable }}" {% if ovnkube_app_name!="ovnkube-node-dpu-host" -%} - name: OVN_SSL_ENABLE value: "{{ ovn_ssl_en }}" diff --git a/dist/templates/ovnkube-single-node-zone.yaml.j2 b/dist/templates/ovnkube-single-node-zone.yaml.j2 index 3007b7c19c..d2d485cca7 100644 --- a/dist/templates/ovnkube-single-node-zone.yaml.j2 +++ b/dist/templates/ovnkube-single-node-zone.yaml.j2 @@ -460,6 +460,8 @@ spec: value: "{{ ovn_enable_ovnkube_identity }}" - name: OVN_ENABLE_SVC_TEMPLATE_SUPPORT value: "{{ ovn_enable_svc_template_support }}" + - name: OVN_NETWORK_QOS_ENABLE + value: "{{ ovn_network_qos_enable }}" - name: OVN_ENABLE_DNSNAMERESOLVER value: "{{ ovn_enable_dnsnameresolver }}" diff --git a/dist/templates/ovnkube-zone-controller.yaml.j2 b/dist/templates/ovnkube-zone-controller.yaml.j2 index d5cb2a1282..363ade3014 100644 --- a/dist/templates/ovnkube-zone-controller.yaml.j2 +++ b/dist/templates/ovnkube-zone-controller.yaml.j2 @@ -377,6 +377,8 @@ spec: value: "{{ ovn_enable_multi_external_gateway }}" - name: OVN_ENABLE_SVC_TEMPLATE_SUPPORT value: "{{ ovn_enable_svc_template_support }}" + - name: OVN_NETWORK_QOS_ENABLE + value: "{{ ovn_network_qos_enable }}" - name: OVN_HOST_NETWORK_NAMESPACE valueFrom: configMapKeyRef: diff --git a/docs/features/network-qos.md b/docs/features/network-qos.md new file mode 100644 index 0000000000..4815359333 --- /dev/null +++ b/docs/features/network-qos.md @@ -0,0 +1,20 @@ +# Network QoS + +## Introduction + +To enable NetworkQoS, we will use Differentiated Services Code Point (DSCP) which allows us to classify packets by setting a 6-bit field in the IP header, effectively marking the priority of a given packet relative to other packets as "Critical", "High Priority", "Best Effort" and so on. + +## Problem Statement +The workloads running in Kubernetes using OVN-Kubernetes as a networking backend might have different requirements in handling network traffic. For example video streaming application needs low latency and jitter whereas storage application can tolerate with packet loss. Hence NetworkQoS is essential in meeting these SLAs to provide better service quality. + +The workload taffic can be either east west (pod to pod traffic) or north south traffic (pod to external traffic) types in a Kubernetes cluster which is limited by finite bandwidth. So NetworkQoS must ensure high priority applications get the necessary NetworkQoS marking so that it can prevent network conjestion. + +## Proposed Solution + +By introducing a new CRD `NetworkQoS`, users could specify a DSCP value for packets originating from pods on a given namespace heading to a specified Namespace Selector, Pod Selector, CIDR, Protocol and Port. This also supports metering for the packets by specifying bandwidth parameters `rate` and/or `burst`. +The CRD will be Namespaced, with multiple resources allowed per namespace. +The resources will be watched by ovn-k, which in turn will configure OVN's [QoS Table](https://man7.org/linux/man-pages/man5/ovn-nb.5.html#NetworkQoS_TABLE). +The `NetworkQoS` also has `status` field which is populated by ovn-k which helps users to identify whether NetworkQoS rules are configured correctly in OVN or not. + +## Sources +- [OKEP-4380: Network QoS Support](https://github.com/ovn-kubernetes/ovn-kubernetes/blob/master/docs/okeps/okep-4380-network-qos.md) diff --git a/docs/installation/launching-ovn-kubernetes-on-kind.md b/docs/installation/launching-ovn-kubernetes-on-kind.md index 586bd9f747..94d087c2e1 100644 --- a/docs/installation/launching-ovn-kubernetes-on-kind.md +++ b/docs/installation/launching-ovn-kubernetes-on-kind.md @@ -129,6 +129,7 @@ usage: kind.sh [[[-cf |--config-file ] [-kt|keep-taint] [-ha|--ha-enabled] [-cl |--ovn-loglevel-controller ] [-me|--multicast-enabled] [-ep |--experimental-provider ] | [-eb |--egress-gw-separate-bridge] + [-nqe|--network-qos-enable] [-h]] -cf | --config-file Name of the KIND J2 configuration file. @@ -170,6 +171,7 @@ usage: kind.sh [[[-cf |--config-file ] [-kt|keep-taint] [-ha|--ha-enabled] -cl | --ovn-loglevel-controller Log config for ovn-controller DEFAULT: '-vconsole:info'. -ep | --experimental-provider Use an experimental OCI provider such as podman, instead of docker. DEFAULT: Disabled. -eb | --egress-gw-separate-bridge The external gateway traffic uses a separate bridge. +-nqe | --network-qos-enable Enable network QoS. DEFAULT: Disabled. -lr |--local-kind-registry Will start and connect a kind local registry to push/retrieve images --delete Delete current cluster --deploy Deploy ovn kubernetes without restarting kind diff --git a/docs/installation/launching-ovn-kubernetes-with-helm.md b/docs/installation/launching-ovn-kubernetes-with-helm.md index 1e658198a4..c25c107af5 100644 --- a/docs/installation/launching-ovn-kubernetes-with-helm.md +++ b/docs/installation/launching-ovn-kubernetes-with-helm.md @@ -291,6 +291,15 @@ false Configure to use multiple NetworkAttachmentDefinition CRD feature with ovn-kubernetes + + global.enableNetworkQos + string +
+""
+
+ + Enables network QoS support from/to pods + global.enableMulticast string diff --git a/go-controller/pkg/config/config.go b/go-controller/pkg/config/config.go index f2bc55925e..e7c8a2f849 100644 --- a/go-controller/pkg/config/config.go +++ b/go-controller/pkg/config/config.go @@ -1141,6 +1141,12 @@ var OVNK8sFeatureFlags = []cli.Flag{ Destination: &cliConfig.OVNKubernetesFeature.EnableObservability, Value: OVNKubernetesFeature.EnableObservability, }, + &cli.BoolFlag{ + Name: "enable-network-qos", + Usage: "Configure to use NetworkQoS CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableNetworkQoS, + Value: OVNKubernetesFeature.EnableNetworkQoS, + }, } // K8sFlags capture Kubernetes-related options diff --git a/helm/ovn-kubernetes/README.md b/helm/ovn-kubernetes/README.md index cc78130274..cdf89a04b5 100644 --- a/helm/ovn-kubernetes/README.md +++ b/helm/ovn-kubernetes/README.md @@ -342,6 +342,15 @@ false Configure to use user defined networks (UDN) feature with ovn-kubernetes + + global.enableNetworkQos + string +
+""
+
+ + Enables network QoS support from/to pods + global.enableMulticast string diff --git a/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/ovnkube-control-plane.yaml b/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/ovnkube-control-plane.yaml index ddab6b479e..2b6edcaa8e 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/ovnkube-control-plane.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/ovnkube-control-plane.yaml @@ -153,6 +153,8 @@ spec: value: {{ hasKey .Values.global "enableInterconnect" | ternary .Values.global.enableInterconnect false | quote }} - name: OVN_ENABLE_MULTI_EXTERNAL_GATEWAY value: {{ hasKey .Values.global "enableMultiExternalGateway" | ternary .Values.global.enableMultiExternalGateway false | quote }} + - name: OVN_NETWORK_QOS_ENABLE + value: {{ hasKey .Values.global "enableNetworkQos" | ternary .Values.global.enableNetworkQos false | quote }} - name: OVN_V4_TRANSIT_SWITCH_SUBNET value: {{ default "" .Values.global.v4TransitSwitchSubnet | quote }} - name: OVN_V6_TRANSIT_SWITCH_SUBNET diff --git a/helm/ovn-kubernetes/charts/ovnkube-master/templates/deployment-ovnkube-master.yaml b/helm/ovn-kubernetes/charts/ovnkube-master/templates/deployment-ovnkube-master.yaml index d06ee79a68..5e8a48a47a 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-master/templates/deployment-ovnkube-master.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-master/templates/deployment-ovnkube-master.yaml @@ -274,6 +274,8 @@ spec: value: {{ hasKey .Values.global "enableSvcTemplate" | ternary .Values.global.enableSvcTemplate true | quote }} - name: OVN_NOHOSTSUBNET_LABEL value: {{ default "k8s.ovn.org/ovn-managed=false" .Values.global.noHostSubnetLabel | quote }} + - name: OVN_NETWORK_QOS_ENABLE + value: {{ hasKey .Values.global "enableNetworkQos" | ternary .Values.global.enableNetworkQos false | quote }} - name: OVN_HOST_NETWORK_NAMESPACE valueFrom: configMapKeyRef: diff --git a/helm/ovn-kubernetes/charts/ovnkube-node-dpu-host/templates/ovnkube-node-dpu-host.yaml b/helm/ovn-kubernetes/charts/ovnkube-node-dpu-host/templates/ovnkube-node-dpu-host.yaml index 123d5c3e01..6d7e840d01 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-node-dpu-host/templates/ovnkube-node-dpu-host.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-node-dpu-host/templates/ovnkube-node-dpu-host.yaml @@ -198,6 +198,8 @@ spec: value: {{ default "" .Values.global.extGatewayNetworkInterface | quote }} - name: OVN_ENABLE_OVNKUBE_IDENTITY value: {{ hasKey .Values.global "enableOvnKubeIdentity" | ternary .Values.global.enableOvnKubeIdentity true | quote }} + - name: OVN_NETWORK_QOS_ENABLE + value: {{ hasKey .Values.global "enableNetworkQos" | ternary .Values.global.enableNetworkQos false | quote }} - name: OVNKUBE_NODE_MODE value: "dpu-host" - name: OVNKUBE_NODE_MGMT_PORT_NETDEV diff --git a/helm/ovn-kubernetes/charts/ovnkube-node-dpu/templates/ovnkube-node-dpu.yaml b/helm/ovn-kubernetes/charts/ovnkube-node-dpu/templates/ovnkube-node-dpu.yaml index 26be761fe7..9544653418 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-node-dpu/templates/ovnkube-node-dpu.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-node-dpu/templates/ovnkube-node-dpu.yaml @@ -231,6 +231,8 @@ spec: value: {{ hasKey .Values.global "enableInterconnect" | ternary .Values.global.enableInterconnect false | quote }} - name: OVN_ENABLE_MULTI_EXTERNAL_GATEWAY value: {{ hasKey .Values.global "enableMultiExternalGateway" | ternary .Values.global.enableMultiExternalGateway false | quote }} + - name: OVN_NETWORK_QOS_ENABLE + value: {{ hasKey .Values.global "enableNetworkQos" | ternary .Values.global.enableNetworkQos false | quote }} - name: OVNKUBE_NODE_MODE value: "dpu" - name: OVN_HOST_NETWORK_NAMESPACE diff --git a/helm/ovn-kubernetes/charts/ovnkube-node/templates/ovnkube-node.yaml b/helm/ovn-kubernetes/charts/ovnkube-node/templates/ovnkube-node.yaml index 7e58a260b1..e4b0a0621a 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-node/templates/ovnkube-node.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-node/templates/ovnkube-node.yaml @@ -235,6 +235,8 @@ spec: value: {{ hasKey .Values.global "enableMultiExternalGateway" | ternary .Values.global.enableMultiExternalGateway false | quote }} - name: OVNKUBE_NODE_MGMT_PORT_NETDEV value: {{ default "" .Values.global.nodeMgmtPortNetdev | quote }} + - name: OVN_NETWORK_QOS_ENABLE + value: {{ hasKey .Values.global "enableNetworkQos" | ternary .Values.global.enableNetworkQos false | quote }} - name: OVN_HOST_NETWORK_NAMESPACE valueFrom: configMapKeyRef: diff --git a/helm/ovn-kubernetes/charts/ovnkube-single-node-zone/templates/ovnkube-single-node-zone.yaml b/helm/ovn-kubernetes/charts/ovnkube-single-node-zone/templates/ovnkube-single-node-zone.yaml index f74726096f..d60276308b 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-single-node-zone/templates/ovnkube-single-node-zone.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-single-node-zone/templates/ovnkube-single-node-zone.yaml @@ -441,6 +441,8 @@ spec: value: {{ hasKey .Values.global "enableDNSNameResolver" | ternary .Values.global.enableDNSNameResolver false | quote }} - name: OVN_OBSERV_ENABLE value: {{ hasKey .Values.global "enableObservability" | ternary .Values.global.enableObservability false | quote }} + - name: OVN_NETWORK_QOS_ENABLE + value: {{ hasKey .Values.global "enableNetworkQos" | ternary .Values.global.enableNetworkQos false | quote }} readinessProbe: exec: command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnkube-node"] diff --git a/helm/ovn-kubernetes/charts/ovnkube-zone-controller/templates/ovnkube-zone-controller.yaml b/helm/ovn-kubernetes/charts/ovnkube-zone-controller/templates/ovnkube-zone-controller.yaml index 30332a08d6..f692ed0524 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-zone-controller/templates/ovnkube-zone-controller.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-zone-controller/templates/ovnkube-zone-controller.yaml @@ -343,6 +343,8 @@ spec: value: {{ hasKey .Values.global "enableMultiExternalGateway" | ternary .Values.global.enableMultiExternalGateway false | quote }} - name: OVN_ENABLE_SVC_TEMPLATE_SUPPORT value: {{ hasKey .Values.global "enableSvcTemplate" | ternary .Values.global.enableSvcTemplate true | quote }} + - name: OVN_NETWORK_QOS_ENABLE + value: {{ hasKey .Values.global "enableNetworkQos" | ternary .Values.global.enableNetworkQos false | quote }} - name: OVN_HOST_NETWORK_NAMESPACE valueFrom: configMapKeyRef: diff --git a/helm/ovn-kubernetes/values-multi-node-zone.yaml b/helm/ovn-kubernetes/values-multi-node-zone.yaml index 716b4ebbea..2eef44ecae 100644 --- a/helm/ovn-kubernetes/values-multi-node-zone.yaml +++ b/helm/ovn-kubernetes/values-multi-node-zone.yaml @@ -68,6 +68,8 @@ global: enableEgressFirewall: true # -- Configure to use EgressQoS CRD feature with ovn-kubernetes enableEgressQos: true + # -- Enables network QoS support from/to pods + enableNetworkQos: false # -- Enables multicast support between the pods within the same namespace enableMulticast: "" # -- Configure to use multiple NetworkAttachmentDefinition CRD feature with ovn-kubernetes diff --git a/helm/ovn-kubernetes/values-no-ic.yaml b/helm/ovn-kubernetes/values-no-ic.yaml index bf2ad903c4..f643f81133 100644 --- a/helm/ovn-kubernetes/values-no-ic.yaml +++ b/helm/ovn-kubernetes/values-no-ic.yaml @@ -62,6 +62,8 @@ global: enableEgressFirewall: true # -- Configure to use EgressQoS CRD feature with ovn-kubernetes enableEgressQos: true + # -- Enables network QoS support from/to pods + enableNetworkQos: false # -- Enables multicast support between the pods within the same namespace enableMulticast: "" # -- Configure to use multiple NetworkAttachmentDefinition CRD feature with ovn-kubernetes diff --git a/helm/ovn-kubernetes/values-single-node-zone.yaml b/helm/ovn-kubernetes/values-single-node-zone.yaml index d5802f1f37..9747d45440 100644 --- a/helm/ovn-kubernetes/values-single-node-zone.yaml +++ b/helm/ovn-kubernetes/values-single-node-zone.yaml @@ -68,6 +68,8 @@ global: enableEgressFirewall: true # -- Configure to use EgressQoS CRD feature with ovn-kubernetes enableEgressQos: true + # -- Enables network QoS support from/to pods + enableNetworkQos: false # -- Enables multicast support between the pods within the same namespace enableMulticast: "" # -- Configure to use multiple NetworkAttachmentDefinition CRD feature with ovn-kubernetes diff --git a/mkdocs.yml b/mkdocs.yml index ef081b915d..c34db68f52 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -121,6 +121,7 @@ nav: - MultiNetworkPolicies: features/multiple-networks/multi-network-policies.md - MultiNetworkRails: features/multiple-networks/multi-vtep.md - Multicast: features/multicast.md + - NetworkQoS: features/network-qos.md - LiveMigration: features/live-migration.md - HybridOverlay: features/hybrid-overlay.md - Hardware Acceleration: From 92e0ef2ab01e979bc522fbf353b3a0aa21f182b7 Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Tue, 16 Jul 2024 00:42:49 +0000 Subject: [PATCH 05/18] Add factory, handlers, clients for Network QoS This commit adds the preparation bits needed to be used by the network qos controller in the following commit. It also provides sufficient permissions to the systemaccount to list/watch/patch the new CRD accordingly. Signed-off-by: Flavio Fernandes (cherry picked from commit 11c7f0d2721e38f49ac120e41b6a6a09595e4591) --- .../rbac-ovnkube-cluster-manager.yaml.j2 | 2 + dist/templates/rbac-ovnkube-master.yaml.j2 | 3 + dist/templates/rbac-ovnkube-node.yaml.j2 | 2 + go-controller/pkg/factory/factory.go | 48 ++++++++ go-controller/pkg/factory/factory_test.go | 115 ++++++++++++++++++ go-controller/pkg/factory/handler.go | 3 + go-controller/pkg/kube/kube.go | 2 + .../node/managementport/port_linux_test.go | 5 +- go-controller/pkg/util/kube.go | 16 +++ .../rbac-ovnkube-cluster-manager.yaml | 2 + .../templates/rbac-ovnkube-master.yaml | 4 + .../templates/rbac-ovnkube-node.yaml | 2 + 12 files changed, 202 insertions(+), 2 deletions(-) diff --git a/dist/templates/rbac-ovnkube-cluster-manager.yaml.j2 b/dist/templates/rbac-ovnkube-cluster-manager.yaml.j2 index 3b347dd91c..44f7020165 100644 --- a/dist/templates/rbac-ovnkube-cluster-manager.yaml.j2 +++ b/dist/templates/rbac-ovnkube-cluster-manager.yaml.j2 @@ -76,6 +76,7 @@ rules: - userdefinednetworks - clusteruserdefinednetworks - routeadvertisements + - networkqoses verbs: [ "get", "list", "watch" ] - apiGroups: ["k8s.ovn.org"] resources: @@ -103,6 +104,7 @@ rules: - adminpolicybasedexternalroutes/status - egressfirewalls/status - egressqoses/status + - networkqoses/status verbs: [ "patch", "update" ] - apiGroups: ["policy.networking.k8s.io"] resources: diff --git a/dist/templates/rbac-ovnkube-master.yaml.j2 b/dist/templates/rbac-ovnkube-master.yaml.j2 index ab4c98fd89..c99d655f91 100644 --- a/dist/templates/rbac-ovnkube-master.yaml.j2 +++ b/dist/templates/rbac-ovnkube-master.yaml.j2 @@ -85,6 +85,7 @@ rules: - adminpolicybasedexternalroutes - userdefinednetworks - clusteruserdefinednetworks + - networkqoses verbs: [ "get", "list", "watch" ] - apiGroups: ["k8s.cni.cncf.io"] resources: @@ -119,6 +120,8 @@ rules: - clusteruserdefinednetworks - clusteruserdefinednetworks/status - clusteruserdefinednetworks/finalizers + - networkqoses + - networkqoses/status verbs: [ "patch", "update" ] - apiGroups: [""] resources: diff --git a/dist/templates/rbac-ovnkube-node.yaml.j2 b/dist/templates/rbac-ovnkube-node.yaml.j2 index 1e9e413c27..b0edb15f17 100644 --- a/dist/templates/rbac-ovnkube-node.yaml.j2 +++ b/dist/templates/rbac-ovnkube-node.yaml.j2 @@ -163,6 +163,7 @@ rules: - adminpolicybasedexternalroutes/status - egressqoses/status - routeadvertisements/status + - networkqoses/status verbs: [ "patch", "update" ] - apiGroups: ["policy.networking.k8s.io"] resources: @@ -185,6 +186,7 @@ rules: - userdefinednetworks - clusteruserdefinednetworks - routeadvertisements + - networkqoses verbs: [ "get", "list", "watch" ] {% if ovn_enable_ovnkube_identity == "true" -%} - apiGroups: ["certificates.k8s.io"] diff --git a/go-controller/pkg/factory/factory.go b/go-controller/pkg/factory/factory.go index f5d6b539de..b3ab80f8d0 100644 --- a/go-controller/pkg/factory/factory.go +++ b/go-controller/pkg/factory/factory.go @@ -81,6 +81,11 @@ import ( egressservicescheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/scheme" egressserviceinformerfactory "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions" egressserviceinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/egressservice/v1" + networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1" + networkqosscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/scheme" + networkqosinformerfactory "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions" + networkqosinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1" + networkqoslister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1" routeadvertisementsapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" routeadvertisementsscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/scheme" routeadvertisementsinformerfactory "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions" @@ -118,6 +123,7 @@ type WatchFactory struct { udnFactory userdefinednetworkapiinformerfactory.SharedInformerFactory raFactory routeadvertisementsinformerfactory.SharedInformerFactory frrFactory frrinformerfactory.SharedInformerFactory + networkQoSFactory networkqosinformerfactory.SharedInformerFactory informers map[reflect.Type]*informer stopChan chan struct{} @@ -239,6 +245,7 @@ var ( IPAMClaimsType reflect.Type = reflect.TypeOf(&ipamclaimsapi.IPAMClaim{}) UserDefinedNetworkType reflect.Type = reflect.TypeOf(&userdefinednetworkapi.UserDefinedNetwork{}) ClusterUserDefinedNetworkType reflect.Type = reflect.TypeOf(&userdefinednetworkapi.ClusterUserDefinedNetwork{}) + NetworkQoSType reflect.Type = reflect.TypeOf(&networkqosapi.NetworkQoS{}) // Resource types used in ovnk node NamespaceExGwType reflect.Type = reflect.TypeOf(&namespaceExGw{}) @@ -306,6 +313,7 @@ func NewOVNKubeControllerWatchFactory(ovnClientset *util.OVNKubeControllerClient mnpFactory: mnpinformerfactory.NewSharedInformerFactory(ovnClientset.MultiNetworkPolicyClient, resyncInterval), egressServiceFactory: egressserviceinformerfactory.NewSharedInformerFactory(ovnClientset.EgressServiceClient, resyncInterval), apbRouteFactory: adminbasedpolicyinformerfactory.NewSharedInformerFactory(ovnClientset.AdminPolicyRouteClient, resyncInterval), + networkQoSFactory: networkqosinformerfactory.NewSharedInformerFactory(ovnClientset.NetworkQoSClient, resyncInterval), informers: make(map[reflect.Type]*informer), stopChan: make(chan struct{}), } @@ -351,6 +359,10 @@ func NewOVNKubeControllerWatchFactory(ovnClientset *util.OVNKubeControllerClient return nil, err } + if err := networkqosapi.AddToScheme(networkqosscheme.Scheme); err != nil { + return nil, err + } + // For Services and Endpoints, pre-populate the shared Informer with one that // has a label selector excluding headless services. wf.iFactory.InformerFor(&corev1.Service{}, func(c kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { @@ -501,6 +513,13 @@ func NewOVNKubeControllerWatchFactory(ovnClientset *util.OVNKubeControllerClient wf.raFactory.K8s().V1().RouteAdvertisements().Informer() } + if config.OVNKubernetesFeature.EnableNetworkQoS { + wf.informers[NetworkQoSType], err = newInformer(NetworkQoSType, wf.networkQoSFactory.K8s().V1().NetworkQoSes().Informer()) + if err != nil { + return nil, err + } + } + return wf, nil } @@ -635,6 +654,15 @@ func (wf *WatchFactory) Start() error { } } + if config.OVNKubernetesFeature.EnableNetworkQoS && wf.networkQoSFactory != nil { + wf.networkQoSFactory.Start(wf.stopChan) + for oType, synced := range waitForCacheSyncWithTimeout(wf.networkQoSFactory, wf.stopChan) { + if !synced { + return fmt.Errorf("error in syncing cache for %v informer", oType) + } + } + } + return nil } @@ -680,9 +708,14 @@ func (wf *WatchFactory) Stop() { if wf.raFactory != nil { wf.raFactory.Shutdown() } + if wf.frrFactory != nil { wf.frrFactory.Shutdown() } + + if wf.networkQoSFactory != nil { + wf.networkQoSFactory.Shutdown() + } } // NewNodeWatchFactory initializes a watch factory with significantly fewer @@ -869,6 +902,7 @@ func NewClusterManagerWatchFactory(ovnClientset *util.OVNClusterManagerClientset dnsFactory: ocpnetworkinformerfactory.NewSharedInformerFactoryWithOptions(ovnClientset.OCPNetworkClient, resyncInterval, ocpnetworkinformerfactory.WithNamespace(config.Kubernetes.OVNConfigNamespace)), apbRouteFactory: adminbasedpolicyinformerfactory.NewSharedInformerFactory(ovnClientset.AdminPolicyRouteClient, resyncInterval), egressQoSFactory: egressqosinformerfactory.NewSharedInformerFactory(ovnClientset.EgressQoSClient, resyncInterval), + networkQoSFactory: networkqosinformerfactory.NewSharedInformerFactory(ovnClientset.NetworkQoSClient, resyncInterval), informers: make(map[reflect.Type]*informer), stopChan: make(chan struct{}), } @@ -1413,6 +1447,11 @@ func (wf *WatchFactory) RemoveBaselineAdminNetworkPolicyHandler(handler *Handler wf.removeHandler(BaselineAdminNetworkPolicyType, handler) } +// RemoveNetworkQoSHandler removes an NetworkQoS object event handler function +func (wf *WatchFactory) RemoveNetworkQoSHandler(handler *Handler) { + wf.removeHandler(NetworkQoSType, handler) +} + // AddNetworkAttachmentDefinitionHandler adds a handler function that will be executed on NetworkAttachmentDefinition object changes func (wf *WatchFactory) AddNetworkAttachmentDefinitionHandler(handlerFuncs cache.ResourceEventHandler, processExisting func([]interface{}) error) (*Handler, error) { return wf.addHandler(NetworkAttachmentDefinitionType, "", nil, handlerFuncs, processExisting, defaultHandlerPriority) @@ -1636,6 +1675,11 @@ func (wf *WatchFactory) GetEgressFirewall(namespace, name string) (*egressfirewa return egressFirewallLister.EgressFirewalls(namespace).Get(name) } +func (wf *WatchFactory) GetNetworkQoSes() ([]*networkqosapi.NetworkQoS, error) { + networkQosLister := wf.informers[NetworkQoSType].lister.(networkqoslister.NetworkQoSLister) + return networkQosLister.List(labels.Everything()) +} + func (wf *WatchFactory) CertificateSigningRequestInformer() certificatesinformers.CertificateSigningRequestInformer { return wf.iFactory.Certificates().V1().CertificateSigningRequests() } @@ -1760,6 +1804,10 @@ func (wf *WatchFactory) FRRConfigurationsInformer() frrinformer.FRRConfiguration return wf.frrFactory.Api().V1beta1().FRRConfigurations() } +func (wf *WatchFactory) NetworkQoSInformer() networkqosinformer.NetworkQoSInformer { + return wf.networkQoSFactory.K8s().V1().NetworkQoSes() +} + // withServiceNameAndNoHeadlessServiceSelector returns a LabelSelector (added to the // watcher for EndpointSlices) that will only choose EndpointSlices with a non-empty // "kubernetes.io/service-name" label and without "service.kubernetes.io/headless" diff --git a/go-controller/pkg/factory/factory_test.go b/go-controller/pkg/factory/factory_test.go index 7ca9951356..ad8c9472e1 100644 --- a/go-controller/pkg/factory/factory_test.go +++ b/go-controller/pkg/factory/factory_test.go @@ -40,6 +40,9 @@ import ( egressservicefake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/fake" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + networkqos "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1" + networkqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -223,6 +226,34 @@ func newIPAMClaim(name string) *ipamclaimsapi.IPAMClaim { } } +func newNetworkQoS(name, namespace string) *networkqos.NetworkQoS { + return &networkqos.NetworkQoS{ + ObjectMeta: newObjectMeta(name, namespace), + Spec: networkqos.Spec{ + NetworkAttachmentName: "default/stream", + Egress: []networkqos.Rule{ + { + Priority: 100, + DSCP: 50, + Classifier: networkqos.Classifier{ + To: []networkqos.Destination{ + { + IPBlock: &knet.IPBlock{ + CIDR: "1.2.3.4/32", + }, + }, + }, + }, + Bandwidth: networkqos.Bandwidth{ + Rate: 20000, + Burst: 10, + }, + }, + }, + }, + } +} + func objSetup(c *fake.Clientset, objType string, listFn func(core.Action) (bool, runtime.Object, error)) *watch.FakeWatcher { w := watch.NewFake() c.AddWatchReactor(objType, core.DefaultWatchReactor(w, nil)) @@ -279,6 +310,13 @@ func ipamClaimsObjSetup(c *ipamclaimsapifake.Clientset, objType string, listFn f return w } +func networkQoSObjSetup(c *networkqosfake.Clientset, objType string, listFn func(core.Action) (bool, runtime.Object, error)) *watch.FakeWatcher { + w := watch.NewFake() + c.AddWatchReactor(objType, core.DefaultWatchReactor(w, nil)) + c.AddReactor("list", objType, listFn) + return w +} + type handlerCalls struct { added int32 updated int32 @@ -310,6 +348,7 @@ var _ = Describe("Watch Factory Operations", func() { adminNetworkPolicyFakeClient *anpapifake.Clientset ipamClaimsFakeClient *ipamclaimsapifake.Clientset nadsFakeClient *nadsfake.Clientset + networkQoSFakeClient *networkqosfake.Clientset podWatch, namespaceWatch, nodeWatch *watch.FakeWatcher policyWatch, serviceWatch *watch.FakeWatcher endpointSliceWatch *watch.FakeWatcher @@ -321,6 +360,7 @@ var _ = Describe("Watch Factory Operations", func() { adminNetPolWatch *watch.FakeWatcher baselineAdminNetPolWatch *watch.FakeWatcher ipamClaimsWatch *watch.FakeWatcher + networkQoSWatch *watch.FakeWatcher pods []*corev1.Pod namespaces []*corev1.Namespace nodes []*corev1.Node @@ -336,6 +376,7 @@ var _ = Describe("Watch Factory Operations", func() { adminNetworkPolicies []*anpapi.AdminNetworkPolicy baselineAdminNetworkPolicies []*anpapi.BaselineAdminNetworkPolicy ipamClaims []*ipamclaimsapi.IPAMClaim + networkQoSes []*networkqos.NetworkQoS err error shutdown bool ) @@ -355,6 +396,7 @@ var _ = Describe("Watch Factory Operations", func() { config.OVNKubernetesFeature.EnableAdminNetworkPolicy = true config.OVNKubernetesFeature.EnableMultiNetwork = true config.OVNKubernetesFeature.EnablePersistentIPs = true + config.OVNKubernetesFeature.EnableNetworkQoS = true config.Kubernetes.PlatformType = string(ocpconfigapi.AWSPlatformType) fakeClient = &fake.Clientset{} @@ -366,6 +408,7 @@ var _ = Describe("Watch Factory Operations", func() { adminNetworkPolicyFakeClient = &anpapifake.Clientset{} ipamClaimsFakeClient = &ipamclaimsapifake.Clientset{} nadsFakeClient = &nadsfake.Clientset{} + networkQoSFakeClient = &networkqosfake.Clientset{} ovnClientset = &util.OVNMasterClientset{ KubeClient: fakeClient, @@ -377,6 +420,7 @@ var _ = Describe("Watch Factory Operations", func() { EgressServiceClient: egressServiceFakeClient, IPAMClaimsClient: ipamClaimsFakeClient, NetworkAttchDefClient: nadsFakeClient, + NetworkQoSClient: networkQoSFakeClient, } ovnCMClientset = &util.OVNClusterManagerClientset{ KubeClient: fakeClient, @@ -513,6 +557,16 @@ var _ = Describe("Watch Factory Operations", func() { } return true, obj, nil }) + + networkQoSes = make([]*networkqos.NetworkQoS, 0) + networkQoSWatch = networkQoSObjSetup(networkQoSFakeClient, "networkqoses", func(core.Action) (bool, runtime.Object, error) { + obj := &networkqos.NetworkQoSList{} + for _, p := range networkQoSes { + obj.Items = append(obj.Items, *p) + } + return true, obj, nil + }) + shutdown = false }) @@ -673,6 +727,10 @@ var _ = Describe("Watch Factory Operations", func() { ipamClaims = append(ipamClaims, newIPAMClaim("claim!")) testExisting(IPAMClaimsType, "", nil, defaultHandlerPriority) }) + It("is called for each existing networkQoS", func() { + networkQoSes = append(networkQoSes, newNetworkQoS("myNetworkQoS", "default")) + testExisting(NetworkQoSType, "", nil, defaultHandlerPriority) + }) It("is called for each existing pod that matches a given namespace and label", func() { pod := newPod("pod1", "default") @@ -787,6 +845,12 @@ var _ = Describe("Watch Factory Operations", func() { baselineAdminNetworkPolicies = append(baselineAdminNetworkPolicies, newBaselineAdminNetworkPolicy("myBANP2")) testExisting(BaselineAdminNetworkPolicyType) }) + It("calls ADD for each existing networkQoS", func() { + networkQoSes = append(networkQoSes, newNetworkQoS("myNetworkQoS", "default")) + networkQoSes = append(networkQoSes, newNetworkQoS("myNetworkQoS1", "default")) + testExisting(NetworkQoSType) + }) + It("doesn't deadlock when factory is shutdown", func() { // every queue has length 10, but some events may be handled before the stop channel event is selected, // so multiply by 15 instead of 10 to ensure overflow @@ -894,6 +958,20 @@ var _ = Describe("Watch Factory Operations", func() { }) }) + Context("when NetworkQoS is disabled", func() { + testExisting := func(objType reflect.Type) { + wf, err = NewMasterWatchFactory(ovnClientset) + Expect(err).NotTo(HaveOccurred()) + err = wf.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(wf.informers).NotTo(HaveKey(objType)) + } + It("does not contain NetworkQoS informer", func() { + config.OVNKubernetesFeature.EnableNetworkQoS = false + testExisting(NetworkQoSType) + }) + }) + addFilteredHandler := func(wf *WatchFactory, objType reflect.Type, realObjType reflect.Type, namespace string, sel labels.Selector, funcs cache.ResourceEventHandlerFuncs) (*Handler, *handlerCalls) { calls := handlerCalls{} h, err := wf.addHandler(objType, namespace, sel, cache.ResourceEventHandlerFuncs{ @@ -2062,6 +2140,43 @@ var _ = Describe("Watch Factory Operations", func() { wf.RemoveIPAMClaimsHandler(h) }) + + It("responds to networkQoS add/update/delete events", func() { + wf, err = NewMasterWatchFactory(ovnClientset) + Expect(err).NotTo(HaveOccurred()) + err = wf.Start() + Expect(err).NotTo(HaveOccurred()) + + added := newNetworkQoS("myNetworkQoS", "default") + h, c := addHandler(wf, NetworkQoSType, cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + networkQoS := obj.(*networkqos.NetworkQoS) + Expect(reflect.DeepEqual(networkQoS, added)).To(BeTrue()) + }, + UpdateFunc: func(old, new interface{}) { + newNetworkQoS := new.(*networkqos.NetworkQoS) + Expect(reflect.DeepEqual(newNetworkQoS, added)).To(BeTrue()) + Expect(newNetworkQoS.Spec.Egress[0].DSCP).To(Equal(42)) + }, + DeleteFunc: func(obj interface{}) { + networkQoS := obj.(*networkqos.NetworkQoS) + Expect(reflect.DeepEqual(networkQoS, added)).To(BeTrue()) + }, + }) + + networkQoSes = append(networkQoSes, added) + networkQoSWatch.Add(added) + Eventually(c.getAdded, 2).Should(Equal(1)) + added.Spec.Egress[0].DSCP = 42 + networkQoSWatch.Modify(added) + Eventually(c.getUpdated, 2).Should(Equal(1)) + networkQoSes = networkQoSes[:0] + networkQoSWatch.Delete(added) + Eventually(c.getDeleted, 2).Should(Equal(1)) + + wf.RemoveNetworkQoSHandler(h) + }) + It("stops processing events after the handler is removed", func() { wf, err = NewMasterWatchFactory(ovnClientset) Expect(err).NotTo(HaveOccurred()) diff --git a/go-controller/pkg/factory/handler.go b/go-controller/pkg/factory/handler.go index 25c05470e4..dac86905d5 100644 --- a/go-controller/pkg/factory/handler.go +++ b/go-controller/pkg/factory/handler.go @@ -26,6 +26,7 @@ import ( egressiplister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/listers/egressip/v1" egressqoslister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1" egressservicelister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/listers/egressservice/v1" + networkqoslister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1" userdefinednetworklister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" ) @@ -504,6 +505,8 @@ func newInformerLister(oType reflect.Type, sharedInformer cache.SharedIndexInfor return userdefinednetworklister.NewUserDefinedNetworkLister(sharedInformer.GetIndexer()), nil case ClusterUserDefinedNetworkType: return userdefinednetworklister.NewClusterUserDefinedNetworkLister(sharedInformer.GetIndexer()), nil + case NetworkQoSType: + return networkqoslister.NewNetworkQoSLister(sharedInformer.GetIndexer()), nil } return nil, fmt.Errorf("cannot create lister from type %v", oType) diff --git a/go-controller/pkg/kube/kube.go b/go-controller/pkg/kube/kube.go index 81cb5a2d30..4171e398e2 100644 --- a/go-controller/pkg/kube/kube.go +++ b/go-controller/pkg/kube/kube.go @@ -29,6 +29,7 @@ import ( egressipclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned" egressqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned" egressserviceclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned" + networkqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" ) // InterfaceOVN represents the exported methods for dealing with getting/setting @@ -89,6 +90,7 @@ type KubeOVN struct { EgressQoSClient egressqosclientset.Interface IPAMClaimsClient ipamclaimssclientset.Interface NADClient nadclientset.Interface + NetworkQoSClient networkqosclientset.Interface } // SetAnnotationsOnPod takes the pod object and map of key/value string pairs to set as annotations diff --git a/go-controller/pkg/node/managementport/port_linux_test.go b/go-controller/pkg/node/managementport/port_linux_test.go index 30bc33b78b..d6d99d7577 100644 --- a/go-controller/pkg/node/managementport/port_linux_test.go +++ b/go-controller/pkg/node/managementport/port_linux_test.go @@ -29,6 +29,7 @@ import ( egressfirewallfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake" egressipv1fake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/fake" egressservicefake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/fake" + networkqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" @@ -273,7 +274,7 @@ func testManagementPort(ctx *cli.Context, fexec *ovntest.FakeExec, testNS ns.Net Expect(err).NotTo(HaveOccurred()) kubeInterface := &kube.KubeOVN{Kube: kube.Kube{KClient: fakeClient}, ANPClient: anpfake.NewSimpleClientset(), EIPClient: egressipv1fake.NewSimpleClientset(), EgressFirewallClient: &egressfirewallfake.Clientset{}, - EgressServiceClient: &egressservicefake.Clientset{}} + EgressServiceClient: &egressservicefake.Clientset{}, NetworkQoSClient: &networkqosfake.Clientset{}} nodeAnnotator := kube.NewNodeAnnotator(kubeInterface, existingNode.Name) watchFactory, err := factory.NewNodeWatchFactory(fakeNodeClient, nodeName) Expect(err).NotTo(HaveOccurred()) @@ -374,7 +375,7 @@ func testManagementPortDPU(ctx *cli.Context, fexec *ovntest.FakeExec, testNS ns. _, err = config.InitConfig(ctx, fexec, nil) Expect(err).NotTo(HaveOccurred()) - kubeInterface := &kube.KubeOVN{Kube: kube.Kube{KClient: fakeClient}, ANPClient: anpfake.NewSimpleClientset(), EIPClient: egressipv1fake.NewSimpleClientset(), EgressFirewallClient: &egressfirewallfake.Clientset{}, EgressServiceClient: &egressservicefake.Clientset{}} + kubeInterface := &kube.KubeOVN{Kube: kube.Kube{KClient: fakeClient}, ANPClient: anpfake.NewSimpleClientset(), EIPClient: egressipv1fake.NewSimpleClientset(), EgressFirewallClient: &egressfirewallfake.Clientset{}, EgressServiceClient: &egressservicefake.Clientset{}, NetworkQoSClient: &networkqosfake.Clientset{}} nodeAnnotator := kube.NewNodeAnnotator(kubeInterface, existingNode.Name) watchFactory, err := factory.NewNodeWatchFactory(fakeNodeClient, nodeName) Expect(err).NotTo(HaveOccurred()) diff --git a/go-controller/pkg/util/kube.go b/go-controller/pkg/util/kube.go index f54a8019ab..becc64c95f 100644 --- a/go-controller/pkg/util/kube.go +++ b/go-controller/pkg/util/kube.go @@ -50,8 +50,10 @@ import ( egressipclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned" egressqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned" egressserviceclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned" + networkqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" routeadvertisementsclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned" userdefinednetworkclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" ) // OVNClientset is a wrapper around all clientsets used by OVN-Kubernetes @@ -71,6 +73,7 @@ type OVNClientset struct { UserDefinedNetworkClient userdefinednetworkclientset.Interface RouteAdvertisementsClient routeadvertisementsclientset.Interface FRRClient frrclientset.Interface + NetworkQoSClient networkqosclientset.Interface } // OVNMasterClientset @@ -90,6 +93,7 @@ type OVNMasterClientset struct { UserDefinedNetworkClient userdefinednetworkclientset.Interface RouteAdvertisementsClient routeadvertisementsclientset.Interface FRRClient frrclientset.Interface + NetworkQoSClient networkqosclientset.Interface } // OVNKubeControllerClientset @@ -107,6 +111,7 @@ type OVNKubeControllerClientset struct { NetworkAttchDefClient networkattchmentdefclientset.Interface UserDefinedNetworkClient userdefinednetworkclientset.Interface RouteAdvertisementsClient routeadvertisementsclientset.Interface + NetworkQoSClient networkqosclientset.Interface } type OVNNodeClientset struct { @@ -134,6 +139,7 @@ type OVNClusterManagerClientset struct { UserDefinedNetworkClient userdefinednetworkclientset.Interface RouteAdvertisementsClient routeadvertisementsclientset.Interface FRRClient frrclientset.Interface + NetworkQoSClient networkqosclientset.Interface } const ( @@ -163,6 +169,7 @@ func (cs *OVNClientset) GetMasterClientset() *OVNMasterClientset { UserDefinedNetworkClient: cs.UserDefinedNetworkClient, RouteAdvertisementsClient: cs.RouteAdvertisementsClient, FRRClient: cs.FRRClient, + NetworkQoSClient: cs.NetworkQoSClient, } } @@ -181,6 +188,7 @@ func (cs *OVNMasterClientset) GetOVNKubeControllerClientset() *OVNKubeController NetworkAttchDefClient: cs.NetworkAttchDefClient, UserDefinedNetworkClient: cs.UserDefinedNetworkClient, RouteAdvertisementsClient: cs.RouteAdvertisementsClient, + NetworkQoSClient: cs.NetworkQoSClient, } } @@ -199,6 +207,7 @@ func (cs *OVNClientset) GetOVNKubeControllerClientset() *OVNKubeControllerClient NetworkAttchDefClient: cs.NetworkAttchDefClient, UserDefinedNetworkClient: cs.UserDefinedNetworkClient, RouteAdvertisementsClient: cs.RouteAdvertisementsClient, + NetworkQoSClient: cs.NetworkQoSClient, } } @@ -218,6 +227,7 @@ func (cs *OVNClientset) GetClusterManagerClientset() *OVNClusterManagerClientset UserDefinedNetworkClient: cs.UserDefinedNetworkClient, RouteAdvertisementsClient: cs.RouteAdvertisementsClient, FRRClient: cs.FRRClient, + NetworkQoSClient: cs.NetworkQoSClient, } } @@ -522,6 +532,11 @@ func NewOVNClientset(conf *config.KubernetesConfig) (*OVNClientset, error) { return nil, err } + networkqosClientset, err := networkqosclientset.NewForConfig(kconfig) + if err != nil { + return nil, err + } + return &OVNClientset{ KubeClient: kclientset, ANPClient: anpClientset, @@ -538,6 +553,7 @@ func NewOVNClientset(conf *config.KubernetesConfig) (*OVNClientset, error) { UserDefinedNetworkClient: userDefinedNetworkClientSet, RouteAdvertisementsClient: routeAdvertisementsClientset, FRRClient: frrClientset, + NetworkQoSClient: networkqosClientset, }, nil } diff --git a/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/rbac-ovnkube-cluster-manager.yaml b/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/rbac-ovnkube-cluster-manager.yaml index f8591de1a7..4a62d3e661 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/rbac-ovnkube-cluster-manager.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/rbac-ovnkube-cluster-manager.yaml @@ -73,6 +73,7 @@ rules: - adminpolicybasedexternalroutes - egressfirewalls - egressqoses + - networkqoses - userdefinednetworks - clusteruserdefinednetworks verbs: [ "get", "list", "watch" ] @@ -80,6 +81,7 @@ rules: resources: - egressips - egressservices/status + - networkqoses/status - userdefinednetworks - userdefinednetworks/status - clusteruserdefinednetworks diff --git a/helm/ovn-kubernetes/charts/ovnkube-master/templates/rbac-ovnkube-master.yaml b/helm/ovn-kubernetes/charts/ovnkube-master/templates/rbac-ovnkube-master.yaml index e742bbb5d4..7474c69f8f 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-master/templates/rbac-ovnkube-master.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-master/templates/rbac-ovnkube-master.yaml @@ -59,6 +59,7 @@ rules: resources: - namespaces - nodes + - nodes/status - pods - services - endpoints @@ -85,6 +86,7 @@ rules: - adminpolicybasedexternalroutes - userdefinednetworks - clusteruserdefinednetworks + - networkqoses verbs: [ "get", "list", "watch" ] - apiGroups: ["k8s.cni.cncf.io"] resources: @@ -109,6 +111,7 @@ rules: - egressfirewalls/status - egressips - egressqoses + - networkqoses - egressservices/status - adminpolicybasedexternalroutes/status - egressqoses/status @@ -117,6 +120,7 @@ rules: - clusteruserdefinednetworks - clusteruserdefinednetworks/status - clusteruserdefinednetworks/finalizers + - networkqoses/status verbs: [ "patch", "update" ] - apiGroups: [""] resources: diff --git a/helm/ovn-kubernetes/templates/rbac-ovnkube-node.yaml b/helm/ovn-kubernetes/templates/rbac-ovnkube-node.yaml index 9f201060ec..850df1b518 100644 --- a/helm/ovn-kubernetes/templates/rbac-ovnkube-node.yaml +++ b/helm/ovn-kubernetes/templates/rbac-ovnkube-node.yaml @@ -162,6 +162,7 @@ rules: - egressfirewalls/status - adminpolicybasedexternalroutes/status - egressqoses/status + - networkqoses/status verbs: [ "patch", "update" ] - apiGroups: ["policy.networking.k8s.io"] resources: @@ -183,6 +184,7 @@ rules: - adminpolicybasedexternalroutes - userdefinednetworks - clusteruserdefinednetworks + - networkqoses verbs: [ "get", "list", "watch" ] {{- if eq (hasKey .Values.global "enableOvnKubeIdentity" | ternary .Values.global.enableOvnKubeIdentity true) true }} - apiGroups: ["certificates.k8s.io"] From c4271a25d6e021fd11b559d2a472027048c34de2 Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Mon, 8 Jul 2024 22:38:52 +0000 Subject: [PATCH 06/18] Implement NetworkQoS controller This commit implements Network QoS controller, based on the enhancement https://github.com/ovn-org/ovn-kubernetes/pull/4366 Signed-off-by: Flavio Fernandes Signed-off-by: Xiaobin Qu (cherry picked from commit 1efaf29b06092e20e4fbe1d01c43aa9aba68fa1a) --- .../status_manager/networkqos_manager.go | 83 ++ .../status_manager/status_manager.go | 11 + .../status_manager/status_manager_test.go | 160 +++ go-controller/pkg/config/config.go | 1 + .../controllermanager/controller_manager.go | 1 + go-controller/pkg/factory/factory.go | 17 +- go-controller/pkg/factory/factory_test.go | 12 +- .../pkg/libovsdb/ops/db_object_types.go | 16 + go-controller/pkg/libovsdb/ops/qos.go | 36 +- .../pkg/ovn/base_network_controller.go | 23 + ...ase_secondary_layer2_network_controller.go | 30 + .../pkg/ovn/controller/network_qos/metrics.go | 102 ++ .../ovn/controller/network_qos/network_qos.go | 327 +++++++ .../network_qos/network_qos_controller.go | 452 +++++++++ .../network_qos/network_qos_namespace.go | 135 +++ .../network_qos/network_qos_node.go | 160 +++ .../network_qos/network_qos_ovnnb.go | 280 ++++++ .../controller/network_qos/network_qos_pod.go | 198 ++++ .../network_qos/network_qos_test.go | 916 ++++++++++++++++++ .../pkg/ovn/controller/network_qos/repair.go | 77 ++ .../pkg/ovn/controller/network_qos/types.go | 392 ++++++++ .../pkg/ovn/controller/network_qos/utils.go | 84 ++ .../pkg/ovn/default_network_controller.go | 13 + .../secondary_layer3_network_controller.go | 14 + go-controller/pkg/types/resource_status.go | 1 + go-controller/pkg/util/fake_client.go | 6 + go-controller/pkg/util/kube.go | 1 - 27 files changed, 3540 insertions(+), 8 deletions(-) create mode 100644 go-controller/pkg/clustermanager/status_manager/networkqos_manager.go create mode 100644 go-controller/pkg/ovn/controller/network_qos/metrics.go create mode 100644 go-controller/pkg/ovn/controller/network_qos/network_qos.go create mode 100644 go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go create mode 100644 go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go create mode 100644 go-controller/pkg/ovn/controller/network_qos/network_qos_node.go create mode 100644 go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go create mode 100644 go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go create mode 100644 go-controller/pkg/ovn/controller/network_qos/network_qos_test.go create mode 100644 go-controller/pkg/ovn/controller/network_qos/repair.go create mode 100644 go-controller/pkg/ovn/controller/network_qos/types.go create mode 100644 go-controller/pkg/ovn/controller/network_qos/utils.go diff --git a/go-controller/pkg/clustermanager/status_manager/networkqos_manager.go b/go-controller/pkg/clustermanager/status_manager/networkqos_manager.go new file mode 100644 index 0000000000..ef63dc0288 --- /dev/null +++ b/go-controller/pkg/clustermanager/status_manager/networkqos_manager.go @@ -0,0 +1,83 @@ +package status_manager + +import ( + "context" + "strings" + + networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + networkqosapply "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1" + networkqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" + networkqoslisters "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type networkQoSManager struct { + lister networkqoslisters.NetworkQoSLister + client networkqosclientset.Interface +} + +func newNetworkQoSManager(lister networkqoslisters.NetworkQoSLister, client networkqosclientset.Interface) *networkQoSManager { + return &networkQoSManager{ + lister: lister, + client: client, + } +} + +//lint:ignore U1000 generic interfaces throw false-positives https://github.com/dominikh/go-tools/issues/1440 +func (m *networkQoSManager) get(namespace, name string) (*networkqosapi.NetworkQoS, error) { + return m.lister.NetworkQoSes(namespace).Get(name) +} + +//lint:ignore U1000 generic interfaces throw false-positives +func (m *networkQoSManager) getMessages(networkQoS *networkqosapi.NetworkQoS) []string { + var messages []string + for _, condition := range networkQoS.Status.Conditions { + messages = append(messages, condition.Message) + } + return messages +} + +//lint:ignore U1000 generic interfaces throw false-positives +func (m *networkQoSManager) updateStatus(networkQoS *networkqosapi.NetworkQoS, applyOpts *metav1.ApplyOptions, + applyEmptyOrFailed bool) error { + if networkQoS == nil { + return nil + } + newStatus := "NetworkQoS Destinations applied" + for _, condition := range networkQoS.Status.Conditions { + if strings.Contains(condition.Message, types.NetworkQoSErrorMsg) { + newStatus = types.NetworkQoSErrorMsg + break + } + } + if applyEmptyOrFailed && newStatus != types.NetworkQoSErrorMsg { + newStatus = "" + } + + if networkQoS.Status.Status == newStatus { + // already set to the same value + return nil + } + + applyStatus := networkqosapply.Status() + if newStatus != "" { + applyStatus.WithStatus(newStatus) + } + + applyObj := networkqosapply.NetworkQoS(networkQoS.Name, networkQoS.Namespace). + WithStatus(applyStatus) + + _, err := m.client.K8sV1alpha1().NetworkQoSes(networkQoS.Namespace).ApplyStatus(context.TODO(), applyObj, *applyOpts) + return err +} + +//lint:ignore U1000 generic interfaces throw false-positives +func (m *networkQoSManager) cleanupStatus(networkQoS *networkqosapi.NetworkQoS, applyOpts *metav1.ApplyOptions) error { + applyObj := networkqosapply.NetworkQoS(networkQoS.Name, networkQoS.Namespace). + WithStatus(networkqosapply.Status()) + + _, err := m.client.K8sV1alpha1().NetworkQoSes(networkQoS.Namespace).ApplyStatus(context.TODO(), applyObj, *applyOpts) + return err +} diff --git a/go-controller/pkg/clustermanager/status_manager/status_manager.go b/go-controller/pkg/clustermanager/status_manager/status_manager.go index 224dc566ff..e770b054ff 100644 --- a/go-controller/pkg/clustermanager/status_manager/status_manager.go +++ b/go-controller/pkg/clustermanager/status_manager/status_manager.go @@ -20,6 +20,7 @@ import ( adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" egressfirewallapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" egressqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -203,6 +204,16 @@ func NewStatusManager(wf *factory.WatchFactory, ovnClient *util.OVNClusterManage ) sm.typedManagers["egressqoses"] = egressQoSManager } + if config.OVNKubernetesFeature.EnableNetworkQoS { + networkQoSManager := newStatusManager[networkqosapi.NetworkQoS]( + "networkqoses_statusmanager", + wf.NetworkQoSInformer().Informer(), + wf.NetworkQoSInformer().Lister().List, + newNetworkQoSManager(wf.NetworkQoSInformer().Lister(), ovnClient.NetworkQoSClient), + sm.withZonesRLock, + ) + sm.typedManagers["networkqoses"] = networkQoSManager + } return sm } diff --git a/go-controller/pkg/clustermanager/status_manager/status_manager_test.go b/go-controller/pkg/clustermanager/status_manager/status_manager_test.go index 56fea90a79..fae419976a 100644 --- a/go-controller/pkg/clustermanager/status_manager/status_manager_test.go +++ b/go-controller/pkg/clustermanager/status_manager/status_manager_test.go @@ -21,12 +21,14 @@ import ( adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" egressfirewallapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" egressqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + networkingv1 "k8s.io/api/networking/v1" ) func getNodeWithZone(nodeName, zoneName string) *corev1.Node { @@ -203,6 +205,72 @@ func checkEmptyEQStatusConsistently(egressQoS *egressqosapi.EgressQoS, fakeClien }).Should(BeTrue(), "expected Status to be consistently empty") } +func newNetworkQoS(namespace string) *networkqosapi.NetworkQoS { + return &networkqosapi.NetworkQoS{ + ObjectMeta: util.NewObjectMeta("default", namespace), + Spec: networkqosapi.Spec{ + NetworkAttachmentRefs: []v1.ObjectReference{ + { + Kind: "NetworkAttachmentDefinition", + Namespace: "default", + Name: "stream", + }, + }, + Priority: 100, + Egress: []networkqosapi.Rule{ + { + DSCP: 60, + Classifier: networkqosapi.Classifier{ + To: []networkqosapi.Destination{ + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "1.2.3.4/32", + }, + }, + }, + }, + Bandwidth: networkqosapi.Bandwidth{ + Rate: 100, + Burst: 1000, + }, + }, + }, + }, + } +} + +func updateNetworkQoSStatus(networkQoS *networkqosapi.NetworkQoS, status *networkqosapi.Status, + fakeClient *util.OVNClusterManagerClientset) { + networkQoS.Status = *status + _, err := fakeClient.NetworkQoSClient.K8sV1alpha1().NetworkQoSes(networkQoS.Namespace). + Update(context.TODO(), networkQoS, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) +} + +func checkNQStatusEventually(networkQoS *networkqosapi.NetworkQoS, expectFailure bool, expectEmpty bool, fakeClient *util.OVNClusterManagerClientset) { + Eventually(func() bool { + eq, err := fakeClient.NetworkQoSClient.K8sV1alpha1().NetworkQoSes(networkQoS.Namespace). + Get(context.TODO(), networkQoS.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + if expectFailure { + return strings.Contains(eq.Status.Status, types.NetworkQoSErrorMsg) + } else if expectEmpty { + return eq.Status.Status == "" + } else { + return strings.Contains(eq.Status.Status, "applied") + } + }).Should(BeTrue(), fmt.Sprintf("expected network QoS status with expectFailure=%v expectEmpty=%v", expectFailure, expectEmpty)) +} + +func checkEmptyNQStatusConsistently(networkQoS *networkqosapi.NetworkQoS, fakeClient *util.OVNClusterManagerClientset) { + Consistently(func() bool { + ef, err := fakeClient.NetworkQoSClient.K8sV1alpha1().NetworkQoSes(networkQoS.Namespace). + Get(context.TODO(), networkQoS.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return ef.Status.Status == "" + }).Should(BeTrue(), "expected Status to be consistently empty") +} + var _ = Describe("Cluster Manager Status Manager", func() { var ( statusManager *StatusManager @@ -505,4 +573,96 @@ var _ = Describe("Cluster Manager Status Manager", func() { return atomic.LoadUint32(&banpWerePatched) }).Should(Equal(uint32(2))) }) + + It("updates NetworkQoS status with 1 zone", func() { + config.OVNKubernetesFeature.EnableNetworkQoS = true + zones := sets.New[string]("zone1") + namespace1 := util.NewNamespace(namespace1Name) + networkQoS := newNetworkQoS(namespace1.Name) + start(zones, namespace1, networkQoS) + updateNetworkQoSStatus(networkQoS, &networkqosapi.Status{ + Conditions: []metav1.Condition{{ + Type: "Ready-In-Zone-zone1", + Status: metav1.ConditionTrue, + Reason: "SetupSucceeded", + Message: "NetworkQoS Destinations applied", + }}, + }, fakeClient) + + checkNQStatusEventually(networkQoS, false, false, fakeClient) + }) + + It("updates NetworkQoS status with 2 zones", func() { + config.OVNKubernetesFeature.EnableNetworkQoS = true + zones := sets.New[string]("zone1", "zone2") + namespace1 := util.NewNamespace(namespace1Name) + networkQoS := newNetworkQoS(namespace1.Name) + start(zones, namespace1, networkQoS) + + updateNetworkQoSStatus(networkQoS, &networkqosapi.Status{ + Conditions: []metav1.Condition{{ + Type: "Ready-In-Zone-zone1", + Status: metav1.ConditionTrue, + Reason: "SetupSucceeded", + Message: "NetworkQoS Destinations applied", + }}, + }, fakeClient) + + checkEmptyNQStatusConsistently(networkQoS, fakeClient) + + updateNetworkQoSStatus(networkQoS, &networkqosapi.Status{ + Conditions: []metav1.Condition{{ + Type: "Ready-In-Zone-zone1", + Status: metav1.ConditionTrue, + Reason: "SetupSucceeded", + Message: "NetworkQoS Destinations applied", + }, { + Type: "Ready-In-Zone-zone2", + Status: metav1.ConditionTrue, + Reason: "SetupSucceeded", + Message: "NetworkQoS Destinations applied", + }}, + }, fakeClient) + checkNQStatusEventually(networkQoS, false, false, fakeClient) + + }) + + It("updates NetworkQoS status with UnknownZone", func() { + config.OVNKubernetesFeature.EnableNetworkQoS = true + zones := sets.New[string]("zone1", zone_tracker.UnknownZone) + namespace1 := util.NewNamespace(namespace1Name) + networkQoS := newNetworkQoS(namespace1.Name) + start(zones, namespace1, networkQoS) + + // no matter how many messages are in the status, it won't be updated while UnknownZone is present + updateNetworkQoSStatus(networkQoS, &networkqosapi.Status{ + Conditions: []metav1.Condition{{ + Type: "Ready-In-Zone-zone1", + Status: metav1.ConditionTrue, + Reason: "SetupSucceeded", + Message: "NetworkQoS Destinations applied", + }}, + }, fakeClient) + checkEmptyNQStatusConsistently(networkQoS, fakeClient) + + // when UnknownZone is removed, updates will be handled, but status from the new zone is not reported yet + statusManager.onZoneUpdate(sets.New[string]("zone1", "zone2")) + checkEmptyNQStatusConsistently(networkQoS, fakeClient) + // when new zone status is reported, status will be set + updateNetworkQoSStatus(networkQoS, &networkqosapi.Status{ + Conditions: []metav1.Condition{{ + Type: "Ready-In-Zone-zone1", + Status: metav1.ConditionTrue, + Reason: "SetupSucceeded", + Message: "NetworkQoS Destinations applied", + }, { + Type: "Ready-In-Zone-zone2", + Status: metav1.ConditionTrue, + Reason: "SetupSucceeded", + Message: "NetworkQoS Destinations applied", + }}, + }, fakeClient) + checkNQStatusEventually(networkQoS, false, false, fakeClient) + }) + }) diff --git a/go-controller/pkg/config/config.go b/go-controller/pkg/config/config.go index e7c8a2f849..c7df666cbc 100644 --- a/go-controller/pkg/config/config.go +++ b/go-controller/pkg/config/config.go @@ -432,6 +432,7 @@ type OVNKubernetesFeatureConfig struct { EnableDNSNameResolver bool `gcfg:"enable-dns-name-resolver"` EnableServiceTemplateSupport bool `gcfg:"enable-svc-template-support"` EnableObservability bool `gcfg:"enable-observability"` + EnableNetworkQoS bool `gcfg:"enable-network-qos"` } // GatewayMode holds the node gateway mode diff --git a/go-controller/pkg/controllermanager/controller_manager.go b/go-controller/pkg/controllermanager/controller_manager.go index 5d0c1fb7a4..107e0ed7c3 100644 --- a/go-controller/pkg/controllermanager/controller_manager.go +++ b/go-controller/pkg/controllermanager/controller_manager.go @@ -210,6 +210,7 @@ func NewControllerManager(ovnClient *util.OVNClientset, wf *factory.WatchFactory APBRouteClient: ovnClient.AdminPolicyRouteClient, EgressQoSClient: ovnClient.EgressQoSClient, IPAMClaimsClient: ovnClient.IPAMClaimsClient, + NetworkQoSClient: ovnClient.NetworkQoSClient, }, stopChan: stopCh, watchFactory: wf, diff --git a/go-controller/pkg/factory/factory.go b/go-controller/pkg/factory/factory.go index b3ab80f8d0..ee8733ba13 100644 --- a/go-controller/pkg/factory/factory.go +++ b/go-controller/pkg/factory/factory.go @@ -151,6 +151,7 @@ func (wf *WatchFactory) ShallowClone() *WatchFactory { udnFactory: wf.udnFactory, raFactory: wf.raFactory, frrFactory: wf.frrFactory, + networkQoSFactory: wf.networkQoSFactory, informers: wf.informers, stopChan: wf.stopChan, @@ -514,7 +515,8 @@ func NewOVNKubeControllerWatchFactory(ovnClientset *util.OVNKubeControllerClient } if config.OVNKubernetesFeature.EnableNetworkQoS { - wf.informers[NetworkQoSType], err = newInformer(NetworkQoSType, wf.networkQoSFactory.K8s().V1().NetworkQoSes().Informer()) + wf.informers[NetworkQoSType], err = newQueuedInformer(eventQueueSize, NetworkQoSType, + wf.networkQoSFactory.K8s().V1().NetworkQoSes().Informer(), wf.stopChan, minNumEventQueues) if err != nil { return nil, err } @@ -627,6 +629,15 @@ func (wf *WatchFactory) Start() error { } } + if config.OVNKubernetesFeature.EnableNetworkQoS && wf.networkQoSFactory != nil { + wf.networkQoSFactory.Start(wf.stopChan) + for oType, synced := range waitForCacheSyncWithTimeout(wf.networkQoSFactory, wf.stopChan) { + if !synced { + return fmt.Errorf("error in syncing cache for %v informer", oType) + } + } + } + if util.IsNetworkSegmentationSupportEnabled() && wf.udnFactory != nil { wf.udnFactory.Start(wf.stopChan) for oType, synced := range waitForCacheSyncWithTimeout(wf.udnFactory, wf.stopChan) { @@ -1181,6 +1192,10 @@ func getObjectMeta(objType reflect.Type, obj interface{}) (*metav1.ObjectMeta, e if cudn, ok := obj.(*userdefinednetworkapi.ClusterUserDefinedNetwork); ok { return &cudn.ObjectMeta, nil } + case NetworkQoSType: + if networkQoS, ok := obj.(*networkqosapi.NetworkQoS); ok { + return &networkQoS.ObjectMeta, nil + } } return nil, fmt.Errorf("cannot get ObjectMeta from type %v", objType) diff --git a/go-controller/pkg/factory/factory_test.go b/go-controller/pkg/factory/factory_test.go index ad8c9472e1..af0e8ddb76 100644 --- a/go-controller/pkg/factory/factory_test.go +++ b/go-controller/pkg/factory/factory_test.go @@ -230,11 +230,17 @@ func newNetworkQoS(name, namespace string) *networkqos.NetworkQoS { return &networkqos.NetworkQoS{ ObjectMeta: newObjectMeta(name, namespace), Spec: networkqos.Spec{ - NetworkAttachmentName: "default/stream", + NetworkAttachmentRefs: []v1.ObjectReference{ + { + Kind: "NetworkAttachmentDefinition", + Namespace: "default", + Name: "stream", + }, + }, + Priority: 100, Egress: []networkqos.Rule{ { - Priority: 100, - DSCP: 50, + DSCP: 50, Classifier: networkqos.Classifier{ To: []networkqos.Destination{ { diff --git a/go-controller/pkg/libovsdb/ops/db_object_types.go b/go-controller/pkg/libovsdb/ops/db_object_types.go index bb2afeea11..62842b8e05 100644 --- a/go-controller/pkg/libovsdb/ops/db_object_types.go +++ b/go-controller/pkg/libovsdb/ops/db_object_types.go @@ -19,6 +19,7 @@ const ( EgressQoSOwnerType ownerType = "EgressQoS" AdminNetworkPolicyOwnerType ownerType = "AdminNetworkPolicy" BaselineAdminNetworkPolicyOwnerType ownerType = "BaselineAdminNetworkPolicy" + NetworkQoSOwnerType ownerType = "NetworkQoS" // NetworkPolicyOwnerType is deprecated for address sets, should only be used for sync. // New owner of network policy address sets, is PodSelectorOwnerType. NetworkPolicyOwnerType ownerType = "NetworkPolicy" @@ -141,6 +142,15 @@ var AddressSetUDNEnabledService = newObjectIDsType(addressSet, UDNEnabledService IPFamilyKey, }) +var AddressSetNetworkQoS = newObjectIDsType(addressSet, NetworkQoSOwnerType, []ExternalIDKey{ + // nqos namespace:name + ObjectNameKey, + // rule index + RuleIndex, + IpBlockIndexKey, + IPFamilyKey, +}) + var ACLAdminNetworkPolicy = newObjectIDsType(acl, AdminNetworkPolicyOwnerType, []ExternalIDKey{ // anp name ObjectNameKey, @@ -344,3 +354,9 @@ var QoSRuleEgressIP = newObjectIDsType(qos, EgressIPOwnerType, []ExternalIDKey{ // the IP Family for this policy, ip4 or ip6 or ip(dualstack) IPFamilyKey, }) + +var NetworkQoS = newObjectIDsType(qos, NetworkQoSOwnerType, []ExternalIDKey{ + ObjectNameKey, + // rule index + RuleIndex, +}) diff --git a/go-controller/pkg/libovsdb/ops/qos.go b/go-controller/pkg/libovsdb/ops/qos.go index d78be6b1e2..d991c4c007 100644 --- a/go-controller/pkg/libovsdb/ops/qos.go +++ b/go-controller/pkg/libovsdb/ops/qos.go @@ -10,6 +10,11 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ) +func getQoSMutableFields(qos *nbdb.QoS) []interface{} { + return []interface{}{&qos.Action, &qos.Bandwidth, &qos.Direction, &qos.ExternalIDs, + &qos.Match, &qos.Priority} +} + type QoSPredicate func(*nbdb.QoS) bool // FindQoSesWithPredicate looks up QoSes from the cache based on a @@ -30,7 +35,7 @@ func CreateOrUpdateQoSesOps(nbClient libovsdbclient.Client, ops []ovsdb.Operatio qos := qoses[i] opModel := operationModel{ Model: qos, - OnModelUpdates: []interface{}{}, // update all fields + OnModelUpdates: getQoSMutableFields(qos), ErrNotFound: false, BulkOp: false, } @@ -48,7 +53,7 @@ func UpdateQoSesOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, qoses qos := qoses[i] opModel := operationModel{ Model: qos, - OnModelUpdates: []interface{}{}, // update all fields + OnModelUpdates: getQoSMutableFields(qos), ErrNotFound: true, BulkOp: false, } @@ -111,10 +116,35 @@ func RemoveQoSesFromLogicalSwitchOps(nbClient libovsdbclient.Client, ops []ovsdb opModels := operationModel{ Model: sw, OnModelMutations: []interface{}{&sw.QOSRules}, - ErrNotFound: true, + ErrNotFound: false, BulkOp: false, } modelClient := newModelClient(nbClient) return modelClient.DeleteOps(ops, opModels) } + +// DeleteQoSesWithPredicateOps returns the ops to delete QoSes based on a given predicate +func DeleteQoSesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, p QoSPredicate) ([]libovsdb.Operation, error) { + deleted := []*nbdb.QoS{} + opModel := operationModel{ + ModelPredicate: p, + ExistingResult: &deleted, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeleteQoSesWithPredicate looks up QoSes from the cache based on +// a given predicate and deletes them +func DeleteQoSesWithPredicate(nbClient libovsdbclient.Client, p QoSPredicate) error { + ops, err := DeleteQoSesWithPredicateOps(nbClient, nil, p) + if err != nil { + return nil + } + _, err = TransactAndCheck(nbClient, ops) + return err +} diff --git a/go-controller/pkg/ovn/base_network_controller.go b/go-controller/pkg/ovn/base_network_controller.go index e5d1c21024..6494fe932f 100644 --- a/go-controller/pkg/ovn/base_network_controller.go +++ b/go-controller/pkg/ovn/base_network_controller.go @@ -36,6 +36,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" + nqoscontroller "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/network_qos" lsm "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/logical_switch_manager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/routeimport" zoneic "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/zone_interconnect" @@ -181,6 +182,9 @@ type BaseNetworkController struct { observManager *observability.Manager routeImportManager routeimport.Manager + + // Controller used for programming OVN for Network QoS + nqosController *nqoscontroller.Controller } func (oc *BaseNetworkController) reconcile(netInfo util.NetInfo, setNodeFailed func(string)) error { @@ -1056,6 +1060,25 @@ func (bnc *BaseNetworkController) DeleteResourceCommon(objType reflect.Type, obj return nil } +func (bnc *BaseNetworkController) newNetworkQoSController() error { + var err error + bnc.nqosController, err = nqoscontroller.NewController( + bnc.controllerName, + bnc.ReconcilableNetInfo.GetNetInfo(), + bnc.nbClient, + bnc.recorder, + bnc.kube.NetworkQoSClient, + bnc.watchFactory.NetworkQoSInformer(), + bnc.watchFactory.NamespaceCoreInformer(), + bnc.watchFactory.PodCoreInformer(), + bnc.watchFactory.NodeCoreInformer(), + bnc.addressSetFactory, + bnc.isPodScheduledinLocalZone, + bnc.zone, + ) + return err +} + func initLoadBalancerGroups(nbClient libovsdbclient.Client, netInfo util.NetInfo) ( clusterLoadBalancerGroupUUID, switchLoadBalancerGroupUUID, routerLoadBalancerGroupUUID string, err error) { diff --git a/go-controller/pkg/ovn/base_secondary_layer2_network_controller.go b/go-controller/pkg/ovn/base_secondary_layer2_network_controller.go index 872a03780c..95ef04d1af 100644 --- a/go-controller/pkg/ovn/base_secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/base_secondary_layer2_network_controller.go @@ -73,6 +73,22 @@ func (oc *BaseSecondaryLayer2NetworkController) cleanup() error { return err } + ops, err = libovsdbops.DeleteQoSesWithPredicateOps(oc.nbClient, ops, + func(item *nbdb.QoS) bool { + return item.ExternalIDs[types.NetworkExternalID] == netName + }) + if err != nil { + return fmt.Errorf("failed to get ops for deleting QoSes of network %s: %v", netName, err) + } + + ops, err = libovsdbops.DeleteAddressSetsWithPredicateOps(oc.nbClient, ops, + func(item *nbdb.AddressSet) bool { + return item.ExternalIDs[types.NetworkExternalID] == netName + }) + if err != nil { + return fmt.Errorf("failed to get ops for deleting address sets of network %s: %v", netName, err) + } + _, err = libovsdbops.TransactAndCheck(oc.nbClient, ops) if err != nil { return fmt.Errorf("failed to deleting switches of network %s: %v", netName, err) @@ -121,6 +137,20 @@ func (oc *BaseSecondaryLayer2NetworkController) run() error { } } + // start NetworkQoS controller if feature is enabled + if config.OVNKubernetesFeature.EnableNetworkQoS { + err := oc.newNetworkQoSController() + if err != nil { + return fmt.Errorf("unable to create network qos controller, err: %w", err) + } + oc.wg.Add(1) + go func() { + defer oc.wg.Done() + // Until we have scale issues in future let's spawn only one thread + oc.nqosController.Run(1, oc.stopChan) + }() + } + // Add ourselves to the route import manager if oc.routeImportManager != nil && config.Gateway.Mode == config.GatewayModeShared { err := oc.routeImportManager.AddNetwork(oc.GetNetInfo()) diff --git a/go-controller/pkg/ovn/controller/network_qos/metrics.go b/go-controller/pkg/ovn/controller/network_qos/metrics.go new file mode 100644 index 0000000000..05aada1eb8 --- /dev/null +++ b/go-controller/pkg/ovn/controller/network_qos/metrics.go @@ -0,0 +1,102 @@ +package networkqos + +import ( + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/prometheus/client_golang/prometheus" +) + +// Metrics to be exposed +var ( + nqosCount = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metrics.MetricOvnkubeNamespace, + Subsystem: metrics.MetricOvnkubeSubsystemController, + Name: "num_network_qoses", + Help: "The total number of network qoses in the cluster", + }, + []string{"network"}, + ) + + nqosOvnOperationDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: metrics.MetricOvnkubeNamespace, + Subsystem: metrics.MetricOvnkubeSubsystemController, + Name: "nqos_ovn_operation_duration_ms", + Help: "Time spent on reconciling a NetworkQoS event", + Buckets: prometheus.ExponentialBuckets(.1, 2, 15), + }, + []string{"operation"}, + ) + + nqosReconcileDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: metrics.MetricOvnkubeNamespace, + Subsystem: metrics.MetricOvnkubeSubsystemController, + Name: "nqos_creation_duration_ms", + Help: "Time spent on reconciling a NetworkQoS event", + Buckets: prometheus.ExponentialBuckets(.1, 2, 15), + }, + []string{"network"}, + ) + + nqosPodReconcileDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: metrics.MetricOvnkubeNamespace, + Subsystem: metrics.MetricOvnkubeSubsystemController, + Name: "nqos_deletion_duration_ms", + Help: "Time spent on reconciling a Pod event", + Buckets: prometheus.ExponentialBuckets(.1, 2, 15), + }, + []string{"network"}, + ) + + nqosNamespaceReconcileDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: metrics.MetricOvnkubeNamespace, + Subsystem: metrics.MetricOvnkubeSubsystemController, + Name: "nqos_ns_reconcile_duration_ms", + Help: "Time spent on reconciling Namespace change for all Pods related to NetworkQoSes", + Buckets: prometheus.ExponentialBuckets(.1, 2, 15), + }, + []string{"network"}, + ) +) + +func init() { + prometheus.MustRegister( + nqosCount, + nqosOvnOperationDuration, + nqosReconcileDuration, + nqosPodReconcileDuration, + nqosNamespaceReconcileDuration, + ) +} + +func (c *Controller) teardownMetricsCollector() { + prometheus.Unregister(nqosCount) +} + +// records the number of networkqos. +func updateNetworkQoSCount(network string, count int) { + nqosCount.WithLabelValues(network).Set(float64(count)) +} + +// records the reconciliation duration for networkqos +func recordNetworkQoSReconcileDuration(network string, duration int64) { + nqosReconcileDuration.WithLabelValues(network).Observe(float64(duration)) +} + +// records time spent on adding/removing a pod to/from networkqos rules +func recordPodReconcileDuration(network string, duration int64) { + nqosPodReconcileDuration.WithLabelValues(network).Observe(float64(duration)) +} + +// records time spent on handling a namespace event which is involved in networkqos +func recordNamespaceReconcileDuration(network string, duration int64) { + nqosNamespaceReconcileDuration.WithLabelValues(network).Observe(float64(duration)) +} + +// records time spent on an ovn operation +func recordOvnOperationDuration(operationType string, duration int64) { + nqosOvnOperationDuration.WithLabelValues(operationType).Observe(float64(duration)) +} diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos.go b/go-controller/pkg/ovn/controller/network_qos/network_qos.go new file mode 100644 index 0000000000..4c542b5fe6 --- /dev/null +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos.go @@ -0,0 +1,327 @@ +package networkqos + +import ( + "context" + "fmt" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + metaapplyv1 "k8s.io/client-go/applyconfigurations/meta/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + + networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + nqosapiapply "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +func (c *Controller) processNextNQOSWorkItem(wg *sync.WaitGroup) bool { + wg.Add(1) + defer wg.Done() + nqosKey, quit := c.nqosQueue.Get() + if quit { + return false + } + defer c.nqosQueue.Done(nqosKey) + + err := c.syncNetworkQoS(nqosKey) + if err == nil { + c.nqosQueue.Forget(nqosKey) + return true + } + utilruntime.HandleError(fmt.Errorf("%v failed with: %v", nqosKey, err)) + + if c.nqosQueue.NumRequeues(nqosKey) < maxRetries { + c.nqosQueue.AddRateLimited(nqosKey) + return true + } + + c.nqosQueue.Forget(nqosKey) + return true +} + +// syncNetworkQoS decides the main logic everytime +// we dequeue a key from the nqosQueue cache +func (c *Controller) syncNetworkQoS(key string) error { + startTime := time.Now() + nqosNamespace, nqosName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + klog.V(5).Infof("%s - Processing sync for Network QoS %s", c.controllerName, nqosName) + + defer func() { + klog.V(5).Infof("%s - Finished syncing Network QoS %s : %v", c.controllerName, nqosName, time.Since(startTime)) + }() + + nqos, err := c.nqosLister.NetworkQoSes(nqosNamespace).Get(nqosName) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + if nqos == nil { + klog.V(5).Infof("%s - NetworkQoS %s has gone", c.controllerName, key) + return c.nqosCache.DoWithLock(key, func(nqosKey string) error { + return c.clearNetworkQos(nqosNamespace, nqosName) + }) + } else { + if !c.networkManagedByMe(nqos.Spec.NetworkAttachmentRefs) { + // maybe NetworkAttachmentName has been changed from this one to other value, try cleanup anyway + return c.nqosCache.DoWithLock(key, func(nqosKey string) error { + return c.clearNetworkQos(nqosNamespace, nqosName) + }) + } + } + klog.V(5).Infof("%s - Processing NetworkQoS %s/%s", c.controllerName, nqos.Namespace, nqos.Name) + // save key to avoid racing + c.nqosCache.Store(key, nil) + // at this stage the NQOS exists in the cluster + return c.nqosCache.DoWithLock(key, func(nqosKey string) error { + if err = c.ensureNetworkQos(nqos); err != nil { + c.nqosCache.Delete(key) + // we can ignore the error if status update doesn't succeed; best effort + c.updateNQOSStatusToNotReady(nqos.Namespace, nqos.Name, "failed to enforce", err) + return err + } + recordNetworkQoSReconcileDuration(c.controllerName, time.Since(startTime).Milliseconds()) + updateNetworkQoSCount(c.controllerName, len(c.nqosCache.GetKeys())) + return nil + }) +} + +// ensureNetworkQos will handle the main reconcile logic for any given nqos's +// add/update that might be triggered either due to NQOS changes or the corresponding +// matching pod or namespace changes. +func (c *Controller) ensureNetworkQos(nqos *networkqosapi.NetworkQoS) error { + desiredNQOSState := &networkQoSState{ + name: nqos.Name, + namespace: nqos.Namespace, + } + + if len(nqos.Spec.PodSelector.MatchLabels) > 0 || len(nqos.Spec.PodSelector.MatchExpressions) > 0 { + if podSelector, err := metav1.LabelSelectorAsSelector(&nqos.Spec.PodSelector); err != nil { + c.updateNQOSStatusToNotReady(nqos.Namespace, nqos.Name, "failed to parse source pod selector", err) + return nil + } else { + desiredNQOSState.PodSelector = podSelector + } + } + + // set EgressRules to desiredNQOSState + rules := []*GressRule{} + for index, ruleSpec := range nqos.Spec.Egress { + bwRate := int(ruleSpec.Bandwidth.Rate) + bwBurst := int(ruleSpec.Bandwidth.Burst) + ruleState := &GressRule{ + Priority: getQoSRulePriority(nqos.Spec.Priority, index), + Dscp: ruleSpec.DSCP, + } + if bwRate > 0 { + ruleState.Rate = &bwRate + } + if bwBurst > 0 { + ruleState.Burst = &bwBurst + } + destStates := []*Destination{} + for _, destSpec := range ruleSpec.Classifier.To { + if destSpec.IPBlock != nil && (destSpec.PodSelector != nil || destSpec.NamespaceSelector != nil) { + c.updateNQOSStatusToNotReady(nqos.Namespace, nqos.Name, "specifying both ipBlock and podSelector/namespaceSelector is not allowed", nil) + return nil + } + destState := &Destination{} + destState.IpBlock = destSpec.IPBlock.DeepCopy() + if destSpec.NamespaceSelector != nil && (len(destSpec.NamespaceSelector.MatchLabels) > 0 || len(destSpec.NamespaceSelector.MatchExpressions) > 0) { + if selector, err := metav1.LabelSelectorAsSelector(destSpec.NamespaceSelector); err != nil { + c.updateNQOSStatusToNotReady(nqos.Namespace, nqos.Name, "failed to parse destination namespace selector", err) + return nil + } else { + destState.NamespaceSelector = selector + } + } + if destSpec.PodSelector != nil && (len(destSpec.PodSelector.MatchLabels) > 0 || len(destSpec.PodSelector.MatchExpressions) > 0) { + if selector, err := metav1.LabelSelectorAsSelector(destSpec.PodSelector); err != nil { + c.updateNQOSStatusToNotReady(nqos.Namespace, nqos.Name, "failed to parse destination pod selector", err) + return nil + } else { + destState.PodSelector = selector + } + } + destStates = append(destStates, destState) + } + ruleState.Classifier = &Classifier{ + Destinations: destStates, + } + if ruleSpec.Classifier.Port.Protocol != "" { + ruleState.Classifier.Protocol = protocol(ruleSpec.Classifier.Port.Protocol) + if !ruleState.Classifier.Protocol.IsValid() { + return fmt.Errorf("invalid protocol: %s, valid values are: tcp, udp, sctp", ruleSpec.Classifier.Port.Protocol) + } + } + if ruleSpec.Classifier.Port.Port > 0 { + port := int(ruleSpec.Classifier.Port.Port) + ruleState.Classifier.Port = &port + } + rules = append(rules, ruleState) + } + desiredNQOSState.EgressRules = rules + if err := desiredNQOSState.initAddressSets(c.addressSetFactory, c.controllerName); err != nil { + return err + } + if err := c.resyncPods(desiredNQOSState); err != nil { + return fmt.Errorf("failed to resync pods: %w", err) + } + // delete stale rules left from previous NetworkQoS definition, along with the address sets + if err := c.cleanupStaleOvnObjects(desiredNQOSState); err != nil { + return fmt.Errorf("failed to delete stale QoSes: %w", err) + } + c.nqosCache.Store(joinMetaNamespaceAndName(nqos.Namespace, nqos.Name), desiredNQOSState) + if e := c.updateNQOSStatusToReady(nqos.Namespace, nqos.Name); e != nil { + return fmt.Errorf("NetworkQoS %s/%s reconciled successfully but unable to patch status: %v", nqos.Namespace, nqos.Name, e) + } + return nil +} + +// clearNetworkQos will handle the logic for deleting all db objects related +// to the provided nqos which got deleted. +// uses externalIDs to figure out ownership +func (c *Controller) clearNetworkQos(nqosNamespace, nqosName string) error { + k8sFullName := joinMetaNamespaceAndName(nqosNamespace, nqosName) + ovnObjectName := joinMetaNamespaceAndName(nqosNamespace, nqosName, ":") + + klog.V(4).Infof("%s - try cleaning up networkqos %s", c.controllerName, k8sFullName) + // remove NBDB objects by NetworkQoS name + if err := c.deleteByName(ovnObjectName); err != nil { + return fmt.Errorf("failed to delete QoS rules for NetworkQoS %s: %w", k8sFullName, err) + } + c.nqosCache.Delete(k8sFullName) + updateNetworkQoSCount(c.controllerName, len(c.nqosCache.GetKeys())) + return nil +} + +const ( + conditionTypeReady = "Ready-In-Zone-" + reasonQoSSetupSuccess = "Success" + reasonQoSSetupFailed = "Failed" +) + +func (c *Controller) updateNQOSStatusToReady(namespace, name string) error { + cond := metav1.Condition{ + Type: conditionTypeReady + c.zone, + Status: metav1.ConditionTrue, + Reason: reasonQoSSetupSuccess, + Message: "NetworkQoS was applied successfully", + } + err := c.updateNQOStatusCondition(cond, namespace, name) + if err != nil { + return fmt.Errorf("failed to update the status of NetworkQoS %s/%s, err: %v", namespace, name, err) + } + klog.V(5).Infof("Patched the status of NetworkQoS %s/%s with condition type %v/%v", + namespace, name, conditionTypeReady+c.zone, metav1.ConditionTrue) + return nil +} + +func (c *Controller) updateNQOSStatusToNotReady(namespace, name, reason string, err error) { + msg := reason + if err != nil { + msg = fmt.Sprintf("NetworkQoS %s/%s - %s, error details: %v", namespace, name, reason, err) + } + cond := metav1.Condition{ + Type: conditionTypeReady + c.zone, + Status: metav1.ConditionFalse, + Reason: reasonQoSSetupFailed, + Message: msg, + } + klog.Error(msg) + err = c.updateNQOStatusCondition(cond, namespace, name) + if err != nil { + klog.Warningf("Failed to update the status of NetworkQoS %s/%s, err: %v", namespace, name, err) + } else { + klog.V(6).Infof("Patched the status of NetworkQoS %s/%s with condition type %v/%v", namespace, name, conditionTypeReady+c.zone, metav1.ConditionTrue) + } +} + +func (c *Controller) updateNQOStatusCondition(newCondition metav1.Condition, namespace, name string) error { + nqos, err := c.nqosLister.NetworkQoSes(namespace).Get(name) + if err != nil { + return err + } + + existingCondition := meta.FindStatusCondition(nqos.Status.Conditions, newCondition.Type) + newConditionApply := &metaapplyv1.ConditionApplyConfiguration{ + Type: &newCondition.Type, + Status: &newCondition.Status, + ObservedGeneration: &newCondition.ObservedGeneration, + Reason: &newCondition.Reason, + Message: &newCondition.Message, + } + + if existingCondition == nil || existingCondition.Status != newCondition.Status { + newConditionApply.LastTransitionTime = ptr.To(metav1.NewTime(time.Now())) + } else { + newConditionApply.LastTransitionTime = &existingCondition.LastTransitionTime + } + + applyObj := nqosapiapply.NetworkQoS(name, namespace). + WithStatus(nqosapiapply.Status().WithConditions(newConditionApply)) + _, err = c.nqosClientSet.K8sV1alpha1().NetworkQoSes(namespace).ApplyStatus(context.TODO(), applyObj, metav1.ApplyOptions{FieldManager: c.zone, Force: true}) + return err +} + +func (c *Controller) resyncPods(nqosState *networkQoSState) error { + pods, err := c.nqosPodLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list pods in namespace %s: %w", nqosState.namespace, err) + } + nsCache := make(map[string]*corev1.Namespace) + for _, pod := range pods { + if pod.Spec.HostNetwork { + continue + } + ns := nsCache[pod.Namespace] + if ns == nil { + ns, err = c.nqosNamespaceLister.Get(pod.Namespace) + if err != nil { + return fmt.Errorf("failed to get namespace %s: %w", pod.Namespace, err) + } + nsCache[pod.Namespace] = ns + } + if err := c.setPodForNQOS(pod, nqosState, ns); err != nil { + return err + } + } + return nil +} + +func (c *Controller) networkManagedByMe(nadRefs []corev1.ObjectReference) bool { + if len(nadRefs) == 0 { + return c.IsDefault() + } + for _, nadRef := range nadRefs { + nadKey := joinMetaNamespaceAndName(nadRef.Namespace, nadRef.Name) + if ((nadKey == "" || nadKey == types.DefaultNetworkName) && c.IsDefault()) || + (!c.IsDefault() && c.HasNAD(nadKey)) { + return true + } + klog.V(6).Infof("Net-attach-def %s is not managed by controller %s ", nadKey, c.controllerName) + } + return false +} + +func (c *Controller) getLogicalSwitchName(nodeName string) string { + switch { + case c.TopologyType() == types.Layer2Topology: + return c.GetNetworkScopedSwitchName(types.OVNLayer2Switch) + case c.TopologyType() == types.LocalnetTopology: + return c.GetNetworkScopedSwitchName(types.OVNLocalnetSwitch) + case !c.IsSecondary() || c.TopologyType() == types.Layer3Topology: + return c.GetNetworkScopedSwitchName(nodeName) + default: + return "" + } +} diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go new file mode 100644 index 0000000000..61f928b3d0 --- /dev/null +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go @@ -0,0 +1,452 @@ +package networkqos + +import ( + "fmt" + "reflect" + "sync" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + corev1informers "k8s.io/client-go/informers/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + + networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + networkqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" + networkqosinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1alpha1" + networkqoslister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/syncmap" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +const ( + // maxRetries is the number of times a object will be retried before it is dropped out of the queue. + // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the + // sequence of delays between successive queuings of an object. + // + // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s + maxRetries = 15 +) + +// Controller holds the fields required for NQOS controller +// taken from k8s controller guidelines +type Controller struct { + // name of the controller that starts the NQOS controller + // (values are default-network-controller, secondary-network-controller etc..) + controllerName string + util.NetInfo + nqosClientSet networkqosclientset.Interface + + // libovsdb northbound client interface + nbClient libovsdbclient.Client + eventRecorder record.EventRecorder + // An address set factory that creates address sets + addressSetFactory addressset.AddressSetFactory + // pass in the isPodScheduledinLocalZone util from bnc - used only to determine + // what zones the pods are in. + // isPodScheduledinLocalZone returns whether the provided pod is in a zone local to the zone controller + // So if pod is not scheduled yet it is considered remote. Also if we can't fetch node from kapi and determing the zone, + // we consider it remote - this is ok for this controller as this variable is only used to + // determine if we need to add pod's port to port group or not - future updates should + // take care of reconciling the state of the cluster + isPodScheduledinLocalZone func(*v1.Pod) bool + // store's the name of the zone that this controller belongs to + zone string + + // nqos namespace+name is key -> cloned value of NQOS kapi is value + //nqosCache map[string]*networkQoSState + nqosCache *syncmap.SyncMap[*networkQoSState] + + // queues for the CRDs where incoming work is placed to de-dup + nqosQueue workqueue.TypedRateLimitingInterface[string] + // cached access to nqos objects + nqosLister networkqoslister.NetworkQoSLister + nqosCacheSynced cache.InformerSynced + // namespace queue, cache, lister + nqosNamespaceLister corev1listers.NamespaceLister + nqosNamespaceSynced cache.InformerSynced + nqosNamespaceQueue workqueue.TypedRateLimitingInterface[string] + // pod queue, cache, lister + nqosPodLister corev1listers.PodLister + nqosPodSynced cache.InformerSynced + nqosPodQueue workqueue.TypedRateLimitingInterface[string] + // node queue, cache, lister + nqosNodeLister corev1listers.NodeLister + nqosNodeSynced cache.InformerSynced + nqosNodeQueue workqueue.TypedRateLimitingInterface[string] +} + +// NewController returns a new *Controller. +func NewController( + controllerName string, + netInfo util.NetInfo, + nbClient libovsdbclient.Client, + recorder record.EventRecorder, + nqosClient networkqosclientset.Interface, + nqosInformer networkqosinformer.NetworkQoSInformer, + namespaceInformer corev1informers.NamespaceInformer, + podInformer corev1informers.PodInformer, + nodeInformer corev1informers.NodeInformer, + addressSetFactory addressset.AddressSetFactory, + isPodScheduledinLocalZone func(*v1.Pod) bool, + zone string) (*Controller, error) { + + c := &Controller{ + controllerName: controllerName, + NetInfo: netInfo, + nbClient: nbClient, + nqosClientSet: nqosClient, + addressSetFactory: addressSetFactory, + isPodScheduledinLocalZone: isPodScheduledinLocalZone, + zone: zone, + nqosCache: syncmap.NewSyncMap[*networkQoSState](), + } + + klog.V(5).Infof("Setting up event handlers for Network QoS controller %s", controllerName) + // setup nqos informers, listers, queue + c.nqosLister = nqosInformer.Lister() + c.nqosCacheSynced = nqosInformer.Informer().HasSynced + c.nqosQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "networkQoS"}, + ) + _, err := nqosInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ + AddFunc: c.onNQOSAdd, + UpdateFunc: c.onNQOSUpdate, + DeleteFunc: c.onNQOSDelete, + })) + if err != nil { + return nil, fmt.Errorf("could not add Event Handler for nqosInformer during network qos controller initialization, %w", err) + } + + klog.V(5).Info("Setting up event handlers for Namespaces in Network QoS controller") + c.nqosNamespaceLister = namespaceInformer.Lister() + c.nqosNamespaceSynced = namespaceInformer.Informer().HasSynced + c.nqosNamespaceQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "nqosNamespaces"}, + ) + _, err = namespaceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ + AddFunc: c.onNQOSNamespaceAdd, + UpdateFunc: c.onNQOSNamespaceUpdate, + DeleteFunc: c.onNQOSNamespaceDelete, + })) + if err != nil { + return nil, fmt.Errorf("could not add Event Handler for namespace Informer during network qos controller initialization, %w", err) + } + + klog.V(5).Info("Setting up event handlers for Pods in Network QoS controller") + c.nqosPodLister = podInformer.Lister() + c.nqosPodSynced = podInformer.Informer().HasSynced + c.nqosPodQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "nqosPods"}, + ) + _, err = podInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ + AddFunc: c.onNQOSPodAdd, + UpdateFunc: c.onNQOSPodUpdate, + DeleteFunc: c.onNQOSPodDelete, + })) + if err != nil { + return nil, fmt.Errorf("could not add Event Handler for pod Informer during network qos controller initialization, %w", err) + } + + klog.V(5).Info("Setting up event handlers for Nodes in Network QoS controller") + c.nqosNodeLister = nodeInformer.Lister() + c.nqosNodeSynced = podInformer.Informer().HasSynced + c.nqosNodeQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "nqosNodes"}, + ) + _, err = nodeInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ + UpdateFunc: c.onNQOSNodeUpdate, + })) + if err != nil { + return nil, fmt.Errorf("could not add Event Handler for node Informer during network qos controller initialization, %w", err) + } + + c.eventRecorder = recorder + return c, nil +} + +// Run will not return until stopCh is closed. workers determines how many +// objects (pods, namespaces, nqoses) will be handled in parallel. +func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + klog.Infof("Starting controller %s", c.controllerName) + + // Wait for the caches to be synced + klog.V(5).Info("Waiting for informer caches to sync") + if !util.WaitForInformerCacheSyncWithTimeout(c.controllerName, stopCh, c.nqosCacheSynced, c.nqosNamespaceSynced, c.nqosPodSynced, c.nqosNodeSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) + klog.Errorf("Error syncing caches for network qos") + return + } + + klog.Infof("Repairing Network QoSes") + // Run the repair function at startup so that we synchronize KAPI and OVNDBs + err := c.repairNetworkQoSes() + if err != nil { + klog.Errorf("Failed to repair Network QoS: %v", err) + } + + wg := &sync.WaitGroup{} + // Start the workers after the repair loop to avoid races + klog.V(5).Info("Starting Network QoS workers") + for i := 0; i < threadiness; i++ { + wg.Add(1) + go func() { + defer wg.Done() + wait.Until(func() { + c.runNQOSWorker(wg) + }, time.Second, stopCh) + }() + } + + klog.V(5).Info("Starting Namespace Network QoS workers") + for i := 0; i < threadiness; i++ { + wg.Add(1) + go func() { + defer wg.Done() + wait.Until(func() { + c.runNQOSNamespaceWorker(wg) + }, time.Second, stopCh) + }() + } + + klog.V(5).Info("Starting Pod Network QoS workers") + for i := 0; i < threadiness; i++ { + wg.Add(1) + go func() { + defer wg.Done() + wait.Until(func() { + c.runNQOSPodWorker(wg) + }, time.Second, stopCh) + }() + } + + klog.V(5).Info("Starting Node Network QoS workers") + for i := 0; i < threadiness; i++ { + wg.Add(1) + go func() { + defer wg.Done() + wait.Until(func() { + c.runNQOSNodeWorker(wg) + }, time.Second, stopCh) + }() + } + + <-stopCh + + klog.Infof("Shutting down controller %s", c.controllerName) + c.nqosQueue.ShutDown() + c.nqosNamespaceQueue.ShutDown() + c.nqosPodQueue.ShutDown() + c.teardownMetricsCollector() + wg.Wait() +} + +// worker runs a worker thread that just dequeues items, processes them, and +// marks them done. You may run as many of these in parallel as you wish; the +// workqueue guarantees that they will not end up processing the same object +// at the same time. +func (c *Controller) runNQOSWorker(wg *sync.WaitGroup) { + for c.processNextNQOSWorkItem(wg) { + } +} + +func (c *Controller) runNQOSNamespaceWorker(wg *sync.WaitGroup) { + for c.processNextNQOSNamespaceWorkItem(wg) { + } +} + +func (c *Controller) runNQOSPodWorker(wg *sync.WaitGroup) { + for c.processNextNQOSPodWorkItem(wg) { + } +} + +func (c *Controller) runNQOSNodeWorker(wg *sync.WaitGroup) { + for c.processNextNQOSNodeWorkItem(wg) { + } +} + +// handlers + +// onNQOSAdd queues the NQOS for processing. +func (c *Controller) onNQOSAdd(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return + } + klog.V(4).Infof("Adding Network QoS %s", key) + c.nqosQueue.Add(key) +} + +// onNQOSUpdate updates the NQOS Selector in the cache and queues the NQOS for processing. +func (c *Controller) onNQOSUpdate(oldObj, newObj interface{}) { + oldNQOS := oldObj.(*networkqosapi.NetworkQoS) + newNQOS := newObj.(*networkqosapi.NetworkQoS) + + // don't process resync or objects that are marked for deletion + if oldNQOS.ResourceVersion == newNQOS.ResourceVersion || + !newNQOS.GetDeletionTimestamp().IsZero() { + return + } + if reflect.DeepEqual(oldNQOS.Spec, newNQOS.Spec) { + return + } + key, err := cache.MetaNamespaceKeyFunc(newObj) + if err == nil { + // updates to NQOS object should be very rare, once put in place they usually stay the same + klog.V(4).Infof("Updating Network QoS %s: nqosSpec %v", + key, newNQOS.Spec) + c.nqosQueue.Add(key) + } +} + +// onNQOSDelete queues the NQOS for processing. +func (c *Controller) onNQOSDelete(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return + } + klog.V(4).Infof("Deleting Network QoS %s", key) + c.nqosQueue.Add(key) +} + +// onNQOSNamespaceAdd queues the namespace for processing. +func (c *Controller) onNQOSNamespaceAdd(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return + } + klog.V(5).Infof("Adding Namespace in Network QoS controller %s", key) + c.nqosNamespaceQueue.Add(key) +} + +// onNQOSNamespaceUpdate queues the namespace for processing. +func (c *Controller) onNQOSNamespaceUpdate(oldObj, newObj interface{}) { + oldNamespace := oldObj.(*v1.Namespace) + newNamespace := newObj.(*v1.Namespace) + + // don't process resync or objects that are marked for deletion + if oldNamespace.ResourceVersion == newNamespace.ResourceVersion || + !newNamespace.GetDeletionTimestamp().IsZero() { + return + } + // If the labels have not changed, then there's no change that we care about: return. + oldNamespaceLabels := labels.Set(oldNamespace.Labels) + newNamespaceLabels := labels.Set(newNamespace.Labels) + if labels.Equals(oldNamespaceLabels, newNamespaceLabels) { + return + } + key, err := cache.MetaNamespaceKeyFunc(newObj) + if err == nil { + klog.V(5).Infof("Updating Namespace in Network QoS controller %s: "+ + "namespaceLabels: %v", key, newNamespaceLabels) + c.nqosNamespaceQueue.Add(key) + } +} + +// onNQOSNamespaceDelete queues the namespace for processing. +func (c *Controller) onNQOSNamespaceDelete(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return + } + klog.V(5).Infof("Deleting Namespace in Network QoS %s", key) + c.nqosNamespaceQueue.Add(key) +} + +// onNQOSPodAdd queues the pod for processing. +func (c *Controller) onNQOSPodAdd(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return + } + klog.V(5).Infof("Adding Pod in Network QoS controller %s", key) + c.nqosPodQueue.Add(key) +} + +// onNQOSPodUpdate queues the pod for processing. +func (c *Controller) onNQOSPodUpdate(oldObj, newObj interface{}) { + oldPod := oldObj.(*v1.Pod) + newPod := newObj.(*v1.Pod) + + // don't process resync or objects that are marked for deletion + if oldPod.ResourceVersion == newPod.ResourceVersion || + !newPod.GetDeletionTimestamp().IsZero() { + return + } + // We only care about pod's label changes, pod's IP changes + // pod going into completed state and pod getting scheduled and switching + // zones. Rest of the cases we may return + oldPodLabels := labels.Set(oldPod.Labels) + newPodLabels := labels.Set(newPod.Labels) + oldPodIPs, _ := util.GetPodIPsOfNetwork(oldPod, c.NetInfo) + newPodIPs, _ := util.GetPodIPsOfNetwork(newPod, c.NetInfo) + oldPodCompleted := util.PodCompleted(oldPod) + newPodCompleted := util.PodCompleted(newPod) + if labels.Equals(oldPodLabels, newPodLabels) && + // check for podIP changes (in case we allocate and deallocate) or for dualstack conversion + // it will also catch the pod update that will come when LSPAdd and IPAM allocation are done + len(oldPodIPs) == len(newPodIPs) && + oldPodCompleted == newPodCompleted { + return + } + key, err := cache.MetaNamespaceKeyFunc(newObj) + if err == nil { + klog.V(5).Infof("Updating Pod in Network QoS controller %s: "+ + "podLabels %v, podIPs: %v, PodCompleted?: %v", key, newPodLabels, + newPodIPs, newPodCompleted) + c.nqosPodQueue.Add(key) + } +} + +// onNQOSPodDelete queues the pod for processing. +func (c *Controller) onNQOSPodDelete(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return + } + klog.V(5).Infof("Deleting Pod Network QoS %s", key) + c.nqosPodQueue.Add(key) +} + +// onNQOSNodeUpdate queues the node for processing. +func (c *Controller) onNQOSNodeUpdate(oldObj, newObj interface{}) { + oldNode := oldObj.(*v1.Node) + newNode := newObj.(*v1.Node) + + // don't process resync or objects that are marked for deletion + if oldNode.ResourceVersion == newNode.ResourceVersion || + !newNode.GetDeletionTimestamp().IsZero() { + return + } + // only care about node's zone name changes + if !util.NodeZoneAnnotationChanged(oldNode, newNode) { + return + } + klog.V(4).Infof("Node %s zone changed from %s to %s", newNode.Name, oldNode.Annotations[util.OvnNodeZoneName], newNode.Annotations[util.OvnNodeZoneName]) + key, err := cache.MetaNamespaceKeyFunc(newObj) + if err == nil { + c.nqosNodeQueue.Add(key) + } +} diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go new file mode 100644 index 0000000000..af96e8b77a --- /dev/null +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go @@ -0,0 +1,135 @@ +package networkqos + +import ( + "fmt" + "sync" + "time" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog/v2" +) + +func (c *Controller) processNextNQOSNamespaceWorkItem(wg *sync.WaitGroup) bool { + wg.Add(1) + defer wg.Done() + nqosNSKey, quit := c.nqosNamespaceQueue.Get() + if quit { + return false + } + defer c.nqosNamespaceQueue.Done(nqosNSKey) + + err := c.syncNetworkQoSNamespace(nqosNSKey) + if err == nil { + c.nqosNamespaceQueue.Forget(nqosNSKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with: %v", nqosNSKey, err)) + + if c.nqosNamespaceQueue.NumRequeues(nqosNSKey) < maxRetries { + c.nqosNamespaceQueue.AddRateLimited(nqosNSKey) + return true + } + + c.nqosNamespaceQueue.Forget(nqosNSKey) + return true +} + +// syncNetworkQoSNamespace decides the main logic everytime +// we dequeue a key from the nqosNamespaceQueue cache +func (c *Controller) syncNetworkQoSNamespace(key string) error { + startTime := time.Now() + klog.V(5).Infof("Processing sync for Namespace %s in Network QoS controller", key) + defer func() { + klog.V(5).Infof("Finished syncing Namespace %s Network QoS controller: took %v", key, time.Since(startTime)) + }() + namespace, err := c.nqosNamespaceLister.Get(key) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + // (i) namespace add + // (ii) namespace update because namespace's labels changed + // (iii) namespace delete + // case (iii) + if namespace == nil { + for _, cachedKey := range c.nqosCache.GetKeys() { + err := c.nqosCache.DoWithLock(cachedKey, func(nqosKey string) error { + if nqosObj, _ := c.nqosCache.Load(nqosKey); nqosObj != nil { + return c.clearNamespaceForNQOS(key, nqosObj) + } + return nil + }) + if err != nil { + return err + } + } + recordNamespaceReconcileDuration(c.controllerName, time.Since(startTime).Milliseconds()) + return nil + } + // case (i)/(ii) + for _, cachedKey := range c.nqosCache.GetKeys() { + err := c.nqosCache.DoWithLock(cachedKey, func(nqosKey string) error { + if nqosObj, _ := c.nqosCache.Load(nqosKey); nqosObj != nil { + return c.setNamespaceForNQOS(namespace, nqosObj) + } else { + klog.Warningf("NetworkQoS not synced yet: %s", nqosKey) + // requeue nqos key to sync it + c.nqosQueue.Add(nqosKey) + // requeue namespace key 3 seconds later, allow NetworkQoS to be handled + c.nqosNamespaceQueue.AddAfter(key, 3*time.Second) + return nil + } + }) + if err != nil { + return err + } + } + recordNamespaceReconcileDuration(c.controllerName, time.Since(startTime).Milliseconds()) + return nil +} + +// clearNamespaceForNQOS will handle the logic for figuring out if the provided namespace name +// has pods that affect address sets of the cached network qoses. If so, remove them. +func (c *Controller) clearNamespaceForNQOS(namespace string, nqosState *networkQoSState) error { + for _, rule := range nqosState.EgressRules { + if rule.Classifier == nil { + continue + } + for _, dest := range rule.Classifier.Destinations { + if err := dest.removePodsInNamespace(namespace); err != nil { + return fmt.Errorf("error removing IPs from dest address set %s: %v", dest.DestAddrSet.GetName(), err) + } + } + } + return nil +} + +// setNamespaceForNQOS will handle the logic for figuring out if the provided namespace name +// has pods that need to populate or removed from the address sets of the network qoses. +func (c *Controller) setNamespaceForNQOS(namespace *v1.Namespace, nqosState *networkQoSState) error { + for _, rule := range nqosState.EgressRules { + if rule.Classifier == nil { + continue + } + for index, dest := range rule.Classifier.Destinations { + if dest.PodSelector == nil && dest.NamespaceSelector == nil { + // no selectors, no address set + continue + } + if !dest.matchNamespace(namespace, nqosState.namespace) { + if err := dest.removePodsInNamespace(namespace.Name); err != nil { + return fmt.Errorf("error removing pods in namespace %s from NetworkQoS %s/%s rule %d: %v", namespace.Name, nqosState.namespace, nqosState.name, index, err) + } + continue + } + // add matching pods in the namespace to dest + if err := dest.addPodsInNamespace(c, namespace.Name); err != nil { + return err + } + klog.V(5).Infof("Added pods in namespace %s for NetworkQoS %s/%s rule %d", namespace.Name, nqosState.namespace, nqosState.name, index) + } + } + return nil +} diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go new file mode 100644 index 0000000000..9e22a2831c --- /dev/null +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go @@ -0,0 +1,160 @@ +package networkqos + +import ( + "fmt" + "sync" + "time" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +func (c *Controller) processNextNQOSNodeWorkItem(wg *sync.WaitGroup) bool { + wg.Add(1) + defer wg.Done() + nqosNodeKey, quit := c.nqosNodeQueue.Get() + if quit { + return false + } + defer c.nqosNodeQueue.Done(nqosNodeKey) + err := c.syncNetworkQoSNode(nqosNodeKey) + if err == nil { + c.nqosNodeQueue.Forget(nqosNodeKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with: %v", nqosNodeKey, err)) + + if c.nqosNodeQueue.NumRequeues(nqosNodeKey) < maxRetries { + c.nqosNodeQueue.AddRateLimited(nqosNodeKey) + return true + } + + c.nqosNodeQueue.Forget(nqosNodeKey) + return true +} + +// syncNetworkQoSNode decides the main logic everytime +// we dequeue a key from the nqosNodeQueue cache +func (c *Controller) syncNetworkQoSNode(key string) error { + startTime := time.Now() + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + klog.V(5).Infof("Processing sync for Node %s in Network QoS controller", name) + + defer func() { + klog.V(5).Infof("Finished syncing Node %s Network QoS controller: took %v", name, time.Since(startTime)) + }() + node, err := c.nqosNodeLister.Get(name) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + if !c.isNodeInLocalZone(node) && c.TopologyType() == types.Layer3Topology { + // clean up qos/address set for the node + return c.cleanupQoSFromNode(node.Name) + } + // configure qos for pods on the node + pods, err := c.getPodsByNode(node.Name) + if err != nil { + return err + } + switchName := c.getLogicalSwitchName(node.Name) + _, err = c.findLogicalSwitch(switchName) + if err != nil { + klog.V(4).Infof("Failed to look up logical switch %s: %v", switchName, err) + return err + } + for _, cachedKey := range c.nqosCache.GetKeys() { + err := c.nqosCache.DoWithLock(cachedKey, func(nqosKey string) error { + nqosObj, _ := c.nqosCache.Load(nqosKey) + if nqosObj == nil { + klog.Warningf("NetworkQoS not synced yet: %s", nqosKey) + // requeue nqos key to sync it + c.nqosQueue.Add(nqosKey) + // requeue namespace key 3 seconds later, allow NetworkQoS to be handled + c.nqosNamespaceQueue.AddAfter(key, 3*time.Second) + return nil + } + for _, pod := range pods { + ns, err := c.nqosNamespaceLister.Get(pod.Namespace) + if err != nil { + return fmt.Errorf("failed to look up namespace %s: %w", pod.Namespace, err) + } + if err = c.setPodForNQOS(pod, nqosObj, ns); err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + } + return nil +} + +func (c *Controller) getPodsByNode(nodeName string) ([]*v1.Pod, error) { + pods, err := c.nqosPodLister.List(labels.Everything()) + if err != nil { + return nil, fmt.Errorf("failed to list pods: %w", err) + } + podsByNode := []*v1.Pod{} + for _, pod := range pods { + if util.PodScheduled(pod) && !util.PodWantsHostNetwork(pod) && pod.Spec.NodeName == nodeName { + podsByNode = append(podsByNode, pod) + } + } + return podsByNode, nil +} + +// isNodeInLocalZone returns whether the provided node is in a zone local to the zone controller +func (c *Controller) isNodeInLocalZone(node *v1.Node) bool { + return util.GetNodeZone(node) == c.zone +} + +func (c *Controller) cleanupQoSFromNode(nodeName string) error { + switchName := c.getLogicalSwitchName(nodeName) + for _, cachedKey := range c.nqosCache.GetKeys() { + err := c.nqosCache.DoWithLock(cachedKey, func(nqosKey string) error { + nqosObj, _ := c.nqosCache.Load(nqosKey) + if nqosObj == nil { + klog.V(4).Infof("Expected networkqos %s not found in cache", nqosKey) + return nil + } + pods := []string{} + if val, _ := nqosObj.SwitchRefs.Load(switchName); val != nil { + pods = val.([]string) + } + for _, pod := range pods { + addrs, _ := nqosObj.Pods.Load(pod) + if addrs != nil { + err := nqosObj.SrcAddrSet.DeleteAddresses(addrs.([]string)) + if err != nil { + return err + } + } + nqosObj.Pods.Delete(pod) + } + err := c.removeQoSFromLogicalSwitches(nqosObj, []string{switchName}) + if err != nil { + return err + } + nqosObj.SwitchRefs.Delete(switchName) + return nil + }) + if err != nil { + return err + } + klog.V(4).Infof("Successfully cleaned up qos rules %s from %s", cachedKey, switchName) + } + return nil +} diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go new file mode 100644 index 0000000000..df35a992ec --- /dev/null +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go @@ -0,0 +1,280 @@ +package networkqos + +import ( + "errors" + "fmt" + "slices" + "strconv" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +func (c *Controller) findLogicalSwitch(switchName string) (*nbdb.LogicalSwitch, error) { + if lsws, err := libovsdbops.FindLogicalSwitchesWithPredicate(c.nbClient, func(item *nbdb.LogicalSwitch) bool { + return item.Name == switchName + }); err != nil { + return nil, fmt.Errorf("failed to look up logical switch %s: %w", switchName, err) + } else if len(lsws) > 0 { + return lsws[0], nil + } + return nil, fmt.Errorf("logical switch %s not found", switchName) +} + +func (c *Controller) addQoSToLogicalSwitch(qosState *networkQoSState, switchName string) error { + // find lsw + lsw, err := c.findLogicalSwitch(switchName) + if err != nil { + return err + } + // construct qoses + qoses := []*nbdb.QoS{} + ipv4Enabled, ipv6Enabled := c.IPMode() + for index, rule := range qosState.EgressRules { + dbIDs := qosState.getDbObjectIDs(c.controllerName, index) + qos := &nbdb.QoS{ + Action: map[string]int{}, + Bandwidth: map[string]int{}, + Direction: nbdb.QoSDirectionToLport, + ExternalIDs: dbIDs.GetExternalIDs(), + Match: generateNetworkQoSMatch(qosState, rule, ipv4Enabled, ipv6Enabled), + Priority: rule.Priority, + } + if c.IsSecondary() { + qos.ExternalIDs[types.NetworkExternalID] = c.GetNetworkName() + } + if rule.Dscp >= 0 { + qos.Action[nbdb.QoSActionDSCP] = rule.Dscp + } + if rule.Rate != nil && *rule.Rate > 0 { + qos.Bandwidth[nbdb.QoSBandwidthRate] = *rule.Rate + } + if rule.Burst != nil && *rule.Burst > 0 { + qos.Bandwidth[nbdb.QoSBandwidthBurst] = *rule.Burst + } + qoses = append(qoses, qos) + } + ops := []libovsdb.Operation{} + ops, err = libovsdbops.CreateOrUpdateQoSesOps(c.nbClient, ops, qoses...) + if err != nil { + return fmt.Errorf("failed to create QoS operations for %s/%s: %w", qosState.namespace, qosState.name, err) + } + // identify qoses need binding to lsw + newQoSes := []*nbdb.QoS{} + for _, qos := range qoses { + if slices.Contains(lsw.QOSRules, qos.UUID) { + continue + } + newQoSes = append(newQoSes, qos) + } + if len(newQoSes) > 0 { + ops, err = libovsdbops.AddQoSesToLogicalSwitchOps(c.nbClient, ops, switchName, newQoSes...) + if err != nil { + return fmt.Errorf("failed to create operations to add QoS to switch %s: %w", switchName, err) + } + } + if _, err := libovsdbops.TransactAndCheck(c.nbClient, ops); err != nil { + return fmt.Errorf("failed to execute ops to add QoSes to switch %s, err: %w", switchName, err) + } + return nil +} + +// remove qos from a list of logical switches +func (c *Controller) removeQoSFromLogicalSwitches(qosState *networkQoSState, switchNames []string) error { + qoses, err := libovsdbops.FindQoSesWithPredicate(c.nbClient, func(qos *nbdb.QoS) bool { + return qos.ExternalIDs[libovsdbops.OwnerControllerKey.String()] == c.controllerName && + qos.ExternalIDs[libovsdbops.OwnerTypeKey.String()] == string(libovsdbops.NetworkQoSOwnerType) && + qos.ExternalIDs[libovsdbops.ObjectNameKey.String()] == qosState.getObjectNameKey() + }) + if err != nil { + return fmt.Errorf("failed to look up QoSes for %s/%s: %v", qosState.namespace, qosState.name, err) + } + unbindQoSOps := []libovsdb.Operation{} + // remove qos rules from logical switches + for _, lsName := range switchNames { + ops, err := libovsdbops.RemoveQoSesFromLogicalSwitchOps(c.nbClient, nil, lsName, qoses...) + if err != nil { + return fmt.Errorf("failed to get ops to remove QoSes from switches %s for NetworkQoS %s/%s: %w", lsName, qosState.namespace, qosState.name, err) + } + unbindQoSOps = append(unbindQoSOps, ops...) + } + if _, err := libovsdbops.TransactAndCheck(c.nbClient, unbindQoSOps); err != nil { + return fmt.Errorf("failed to execute ops to remove QoSes from logical switches, err: %w", err) + } + return nil +} + +func (c *Controller) cleanupStaleOvnObjects(qosState *networkQoSState) error { + // find existing QoSes owned by NetworkQoS + existingQoSes, err := libovsdbops.FindQoSesWithPredicate(c.nbClient, func(qos *nbdb.QoS) bool { + return qos.ExternalIDs[libovsdbops.OwnerControllerKey.String()] == c.controllerName && + qos.ExternalIDs[libovsdbops.OwnerTypeKey.String()] == string(libovsdbops.NetworkQoSOwnerType) && + qos.ExternalIDs[libovsdbops.ObjectNameKey.String()] == qosState.getObjectNameKey() + }) + if err != nil { + return fmt.Errorf("error looking up existing QoSes for %s/%s: %v", qosState.namespace, qosState.name, err) + } + staleSwitchQoSMap := map[string][]*nbdb.QoS{} + totalNumOfRules := len(qosState.EgressRules) + for _, qos := range existingQoSes { + index := qos.ExternalIDs[libovsdbops.RuleIndex.String()] + numIndex, convError := strconv.Atoi(index) + indexWithinRange := false + if index != "" && convError == nil && numIndex < totalNumOfRules { + // rule index is valid + indexWithinRange = true + } + // qos is considered stale since the index is out of range + // get switches that reference to the stale qos + switches, err := libovsdbops.FindLogicalSwitchesWithPredicate(c.nbClient, func(ls *nbdb.LogicalSwitch) bool { + return util.SliceHasStringItem(ls.QOSRules, qos.UUID) + }) + if err != nil { + if !errors.Is(err, libovsdbclient.ErrNotFound) { + return fmt.Errorf("error looking up logical switches by qos: %w", err) + } + continue + } + // build map of switch->list(qos) + for _, ls := range switches { + if _, qosInUse := qosState.SwitchRefs.Load(ls.Name); indexWithinRange && qosInUse { + continue + } + qosList := staleSwitchQoSMap[ls.UUID] + if qosList == nil { + qosList = []*nbdb.QoS{} + } + qosList = append(qosList, qos) + staleSwitchQoSMap[ls.Name] = qosList + } + } + allOps, err := c.findStaleAddressSets(qosState) + if err != nil { + return fmt.Errorf("failed to get ops to delete stale address sets for NetworkQoS %s/%s: %w", qosState.namespace, qosState.name, err) + } + // remove stale qos rules from logical switches + for lsName, qoses := range staleSwitchQoSMap { + var switchOps []libovsdb.Operation + switchOps, err = libovsdbops.RemoveQoSesFromLogicalSwitchOps(c.nbClient, switchOps, lsName, qoses...) + if err != nil { + return fmt.Errorf("failed to get ops to remove stale QoSes from switches %s for NetworkQoS %s/%s: %w", lsName, qosState.namespace, qosState.name, err) + } + allOps = append(allOps, switchOps...) + } + // commit allOps + if _, err := libovsdbops.TransactAndCheck(c.nbClient, allOps); err != nil { + return fmt.Errorf("failed to execute ops to clean up stale QoSes, err: %w", err) + } + return nil +} + +// delete ovn QoSes generated from network qos +func (c *Controller) deleteByName(ovnObjectName string) error { + qoses, err := libovsdbops.FindQoSesWithPredicate(c.nbClient, func(qos *nbdb.QoS) bool { + return qos.ExternalIDs[libovsdbops.OwnerControllerKey.String()] == c.controllerName && + qos.ExternalIDs[libovsdbops.OwnerTypeKey.String()] == string(libovsdbops.NetworkQoSOwnerType) && + qos.ExternalIDs[libovsdbops.ObjectNameKey.String()] == ovnObjectName + }) + if err != nil { + return fmt.Errorf("failed to look up QoSes by name %s: %v", ovnObjectName, err) + } + if err = c.deleteOvnQoSes(qoses); err != nil { + return fmt.Errorf("error cleaning up OVN QoSes for %s: %v", ovnObjectName, err) + } + // remove address sets + if err = c.deleteAddressSet(ovnObjectName); err != nil { + return fmt.Errorf("error cleaning up address sets for %s: %w", ovnObjectName, err) + } + return nil +} + +// delete a list of ovn QoSes +func (c *Controller) deleteOvnQoSes(qoses []*nbdb.QoS) error { + switchQoSMap := map[string][]*nbdb.QoS{} + for _, qos := range qoses { + switches, err := libovsdbops.FindLogicalSwitchesWithPredicate(c.nbClient, func(ls *nbdb.LogicalSwitch) bool { + return util.SliceHasStringItem(ls.QOSRules, qos.UUID) + }) + if err != nil { + if !errors.Is(err, libovsdbclient.ErrNotFound) { + return fmt.Errorf("failed to look up logical switches by qos: %w", err) + } + continue + } + // get switches that reference to the stale qoses + for _, ls := range switches { + qosList := switchQoSMap[ls.Name] + if qosList == nil { + qosList = []*nbdb.QoS{} + } + qosList = append(qosList, qos) + switchQoSMap[ls.Name] = qosList + } + } + unbindQoSOps := []libovsdb.Operation{} + // remove qos rules from logical switches + for lsName, qoses := range switchQoSMap { + ops, err := libovsdbops.RemoveQoSesFromLogicalSwitchOps(c.nbClient, nil, lsName, qoses...) + if err != nil { + return fmt.Errorf("failed to get ops to remove QoSes from switch %s: %w", lsName, err) + } + unbindQoSOps = append(unbindQoSOps, ops...) + } + if _, err := libovsdbops.TransactAndCheck(c.nbClient, unbindQoSOps); err != nil { + return fmt.Errorf("failed to execute ops to remove QoSes from logical switches, err: %w", err) + } + // delete qos + delQoSOps, err := libovsdbops.DeleteQoSesOps(c.nbClient, nil, qoses...) + if err != nil { + return fmt.Errorf("failed to get ops to delete QoSes: %w", err) + } + if _, err := libovsdbops.TransactAndCheck(c.nbClient, delQoSOps); err != nil { + return fmt.Errorf("failed to execute ops to delete QoSes, err: %w", err) + } + return nil +} + +func (c *Controller) deleteAddressSet(qosName string) error { + // find address sets by networkqos name & controller name + delAddrSetOps, err := libovsdbops.DeleteAddressSetsWithPredicateOps(c.nbClient, nil, func(item *nbdb.AddressSet) bool { + return item.ExternalIDs[libovsdbops.OwnerControllerKey.String()] == c.controllerName && + item.ExternalIDs[libovsdbops.OwnerTypeKey.String()] == string(libovsdbops.NetworkQoSOwnerType) && + item.ExternalIDs[libovsdbops.ObjectNameKey.String()] == qosName + }) + if err != nil { + return fmt.Errorf("failed to get ops to delete address sets: %w", err) + } + if _, err := libovsdbops.TransactAndCheck(c.nbClient, delAddrSetOps); err != nil { + return fmt.Errorf("failed to execute ops to delete address sets, err: %w", err) + } + return nil +} + +// find stale address sets +// 1. find address sets owned by NetworkQoS +// 2. get address sets in use +// 3. compare and identify those not in use +func (c *Controller) findStaleAddressSets(qosState *networkQoSState) ([]libovsdb.Operation, error) { + staleAddressSets := []*nbdb.AddressSet{} + addrsets, err := libovsdbops.FindAddressSetsWithPredicate(c.nbClient, func(item *nbdb.AddressSet) bool { + return item.ExternalIDs[libovsdbops.OwnerControllerKey.String()] == c.controllerName && + item.ExternalIDs[libovsdbops.OwnerTypeKey.String()] == string(libovsdbops.NetworkQoSOwnerType) && + item.ExternalIDs[libovsdbops.ObjectNameKey.String()] == qosState.getObjectNameKey() + }) + if err != nil { + return nil, fmt.Errorf("failed to look up address sets: %w", err) + } + addrsetInUse := qosState.getAddressSetHashNames() + for _, addrset := range addrsets { + addrsetName := addrset.GetName() + if !slices.Contains(addrsetInUse, addrsetName) { + staleAddressSets = append(staleAddressSets, addrset) + } + } + return libovsdbops.DeleteAddressSetsOps(c.nbClient, nil, staleAddressSets...) +} diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go new file mode 100644 index 0000000000..88ac75c4dd --- /dev/null +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go @@ -0,0 +1,198 @@ +package networkqos + +import ( + "fmt" + "strings" + "sync" + "time" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +func (c *Controller) processNextNQOSPodWorkItem(wg *sync.WaitGroup) bool { + wg.Add(1) + defer wg.Done() + nqosPodKey, quit := c.nqosPodQueue.Get() + if quit { + return false + } + defer c.nqosPodQueue.Done(nqosPodKey) + err := c.syncNetworkQoSPod(nqosPodKey) + if err == nil { + c.nqosPodQueue.Forget(nqosPodKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with: %v", nqosPodKey, err)) + + if c.nqosPodQueue.NumRequeues(nqosPodKey) < maxRetries { + c.nqosPodQueue.AddRateLimited(nqosPodKey) + return true + } + + c.nqosPodQueue.Forget(nqosPodKey) + return true +} + +// syncNetworkQoSPod decides the main logic everytime +// we dequeue a key from the nqosPodQueue cache +func (c *Controller) syncNetworkQoSPod(key string) error { + startTime := time.Now() + // Iterate all NQOses and check if this namespace start/stops matching + // any NQOS and add/remove the setup accordingly. Namespaces can match multiple + // NQOses objects, so continue iterating all NQOS objects before finishing. + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + klog.V(5).Infof("Processing sync for Pod %s/%s in Network QoS controller", namespace, name) + + defer func() { + klog.V(5).Infof("Finished syncing Pod %s/%s Network QoS controller: took %v", namespace, name, time.Since(startTime)) + }() + ns, err := c.nqosNamespaceLister.Get(namespace) + if err != nil { + return err + } + podNamespaceLister := c.nqosPodLister.Pods(namespace) + pod, err := podNamespaceLister.Get(name) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + // (i) pod add + // (ii) pod update because LSP and IPAM is done OR pod's labels changed + // (iii) pod update because pod went into completed state + // (iv) pod delete + // case(iii)/(iv) + if pod == nil || util.PodCompleted(pod) { + for _, cachedKey := range c.nqosCache.GetKeys() { + err := c.nqosCache.DoWithLock(cachedKey, func(nqosKey string) error { + if nqosObj, _ := c.nqosCache.Load(nqosKey); nqosObj != nil { + return c.clearPodForNQOS(namespace, name, nqosObj) + } + return nil + }) + if err != nil { + return err + } + } + recordPodReconcileDuration(c.controllerName, time.Since(startTime).Milliseconds()) + return nil + } + // We don't want to shortcuit only local zone pods here since peer pods + // whether local or remote need to be dealt with. So we let the main + // NQOS controller take care of the local zone pods logic for the policy subjects + if !util.PodScheduled(pod) || util.PodWantsHostNetwork(pod) { + // we don't support NQOS with host-networked pods + // if pod is no scheduled yet, return and we can process it on its next update + // because anyways at that stage pod is considered to belong to remote zone + return nil + } + // case (i)/(ii) + for _, cachedKey := range c.nqosCache.GetKeys() { + err := c.nqosCache.DoWithLock(cachedKey, func(nqosKey string) error { + if nqosObj, _ := c.nqosCache.Load(nqosKey); nqosObj != nil { + return c.setPodForNQOS(pod, nqosObj, ns) + } else { + klog.Warningf("NetworkQoS not synced yet: %s", nqosKey) + // requeue nqos key to sync it + c.nqosQueue.Add(nqosKey) + // requeue pod key in 3 sec + c.nqosPodQueue.AddAfter(key, 3*time.Second) + } + return nil + }) + if err != nil { + return err + } + } + recordPodReconcileDuration(c.controllerName, time.Since(startTime).Milliseconds()) + return nil +} + +// clearPodForNQOS will handle the logic for figuring out if the provided pod name +func (c *Controller) clearPodForNQOS(namespace, name string, nqosState *networkQoSState) error { + fullPodName := joinMetaNamespaceAndName(namespace, name) + if err := nqosState.removePodFromSource(c, fullPodName, nil); err != nil { + return err + } + // remove pod from destination address set + for _, rule := range nqosState.EgressRules { + if rule.Classifier == nil { + continue + } + for _, dest := range rule.Classifier.Destinations { + if dest.PodSelector == nil && dest.NamespaceSelector == nil { + continue + } + if err := dest.removePod(fullPodName, nil); err != nil { + return err + } + } + } + return nil +} + +// setPodForNQOS wil lcheck if the pod meets source selector or dest selector +// - match source: add the ip to source address set, bind qos rule to the switch +// - match dest: add the ip to the destination address set +func (c *Controller) setPodForNQOS(pod *v1.Pod, nqosState *networkQoSState, namespace *v1.Namespace) error { + addresses, err := getPodAddresses(pod, c.NetInfo) + if err == nil && len(addresses) == 0 { + // pod hasn't been annotated with addresses yet, return without retry + klog.V(6).Infof("Pod %s/%s doesn't have addresses on network %s, skip NetworkQoS processing", pod.Namespace, pod.Name, c.GetNetworkName()) + return nil + } else if err != nil { + return fmt.Errorf("failed to parse addresses for pod %s/%s, network %s, err: %v", pod.Namespace, pod.Name, c.GetNetworkName(), err) + } + fullPodName := joinMetaNamespaceAndName(pod.Namespace, pod.Name) + // is pod in this zone + if c.isPodScheduledinLocalZone(pod) { + if matchSource := nqosState.matchSourceSelector(pod); matchSource { + // pod's labels match source selector + err = nqosState.configureSourcePod(c, pod, addresses) + } else { + // pod's labels don't match selector, but it probably matched previously + err = nqosState.removePodFromSource(c, fullPodName, addresses) + } + if err != nil { + return err + } + } else { + klog.V(4).Infof("Pod %s is not scheduled in local zone, call remove to ensure it's not in source", fullPodName) + err = nqosState.removePodFromSource(c, fullPodName, addresses) + if err != nil { + return err + } + } + return reconcilePodForDestinations(nqosState, namespace, pod, addresses) +} + +func reconcilePodForDestinations(nqosState *networkQoSState, podNs *v1.Namespace, pod *v1.Pod, addresses []string) error { + fullPodName := joinMetaNamespaceAndName(pod.Namespace, pod.Name) + for _, rule := range nqosState.EgressRules { + for index, dest := range rule.Classifier.Destinations { + if dest.PodSelector == nil && dest.NamespaceSelector == nil { + continue + } + if dest.matchPod(podNs, pod, nqosState.namespace) { + // add pod address to address set + if err := dest.addPod(pod.Namespace, pod.Name, addresses); err != nil { + return fmt.Errorf("failed to add addresses {%s} to dest address set %s for NetworkQoS %s/%s, rule index %d: %v", strings.Join(addresses, ","), dest.DestAddrSet.GetName(), nqosState.namespace, nqosState.name, index, err) + } + } else { + // no match, remove the pod if it's previously selected + if err := dest.removePod(fullPodName, addresses); err != nil { + return fmt.Errorf("failed to delete addresses {%s} from dest address set %s for NetworkQoS %s/%s, rule index %d: %v", strings.Join(addresses, ","), dest.DestAddrSet.GetName(), nqosState.namespace, nqosState.name, index, err) + } + } + } + } + return nil +} diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go new file mode 100644 index 0000000000..00cb68aab2 --- /dev/null +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go @@ -0,0 +1,916 @@ +package networkqos + +import ( + "context" + "fmt" + "slices" + "strconv" + "sync" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + nqostype "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + fakenqosclient "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" + ovnk8stesting "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" + libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +func init() { + config.IPv4Mode = true + config.IPv6Mode = false + config.OVNKubernetesFeature.EnableNetworkQoS = true + config.OVNKubernetesFeature.EnableMultiNetwork = false + config.OVNKubernetesFeature.EnableInterconnect = false // set via tableEntrySetup +} + +var ( + defaultControllerName = "default-network-controller" + streamControllerName = "stream-network-controller" + watchFactory *factory.WatchFactory + stopChan chan (struct{}) + nbClient libovsdbclient.Client + nbsbCleanup *libovsdbtest.Context + fakeKubeClient *fake.Clientset + fakeNQoSClient *fakenqosclient.Clientset + wg sync.WaitGroup + defaultAddrsetFactory addressset.AddressSetFactory + streamAddrsetFactory addressset.AddressSetFactory + + nqosNamespace = "network-qos-test" + nqosName = "my-network-qos" + clientPodName = "client-pod" + + app1Namespace = "app1-ns" + app3Namespace = "app3-ns" +) + +func TestNetworkQoS(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "NetworkQoS Controller") +} + +func tableEntrySetup(enableInterconnect bool) { + config.OVNKubernetesFeature.EnableInterconnect = enableInterconnect + + ns0 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: nqosNamespace, + Labels: map[string]string{ + "app": "client", + }, + }, + } + ns1 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: app1Namespace, + Labels: map[string]string{ + "app": "app1", + }, + }, + } + ns3 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: app3Namespace, + Labels: map[string]string{ + "app": "app3", + }, + }, + } + nqos := &nqostype.NetworkQoS{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: nqosNamespace, + Name: nqosName, + }, + Spec: nqostype.Spec{ + Priority: 100, + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "client", + }, + }, + Egress: []nqostype.Rule{ + { + DSCP: 50, + Bandwidth: nqostype.Bandwidth{ + Rate: 10000, + Burst: 100000, + }, + Classifier: nqostype.Classifier{ + To: []nqostype.Destination{ + { + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "component": "service1", + }, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "app1", + }, + }, + }, + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "128.116.0.0/17", + Except: []string{ + "128.116.0.0", + "128.116.0.255", + }, + }, + }, + }, + Port: nqostype.Port{ + Protocol: "tcp", + Port: 8080, + }, + }, + }, + { + DSCP: 51, + Classifier: nqostype.Classifier{ + To: []nqostype.Destination{ + { + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "component": "service3", + }, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "app3", + }, + }, + }, + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "128.118.0.0/17", + Except: []string{ + "128.118.0.0", + "128.118.0.255", + }, + }, + }, + }, + Port: nqostype.Port{ + Protocol: "udp", + Port: 9090, + }, + }, + }, + }, + }, + } + + node1 := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + Annotations: map[string]string{ + "k8s.ovn.org/zone-name": "node1", + }, + }, + } + + node2 := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + Annotations: map[string]string{ + "k8s.ovn.org/zone-name": "node2", + }, + }, + } + + clientPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: nqosNamespace, + Name: clientPodName, + Labels: map[string]string{ + "app": "client", + }, + Annotations: map[string]string{ + "k8s.ovn.org/pod-networks": `{"default/stream": {"ip_addresses":["10.128.2.3/26"],"mac_address":"0a:58:0a:80:02:03"}, "default":{"ip_addresses":["10.192.177.4/26"],"mac_address":"0a:58:0a:c0:b1:04","gateway_ips":["10.192.177.1"],"routes":[{"dest":"10.192.0.0/16","nextHop":"10.192.177.1"},{"dest":"10.223.0.0/16","nextHop":"10.192.177.1"},{"dest":"100.64.0.0/16","nextHop":"10.192.177.1"}],"mtu":"1500","ip_address":"10.192.177.4/26","gateway_ip":"10.192.177.1"}}`, + "k8s.v1.cni.cncf.io/networks": `[{"interface":"net1","name":"stream","namespace":"default"}]`, + }, + }, + Spec: corev1.PodSpec{ + HostNetwork: false, + NodeName: "node1", + }, + } + + initialDB := &libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + &nbdb.LogicalSwitch{ + Name: "node1", + }, + &nbdb.LogicalSwitch{ + Name: "node2", + }, + &nbdb.LogicalSwitch{ + Name: "stream_node1", + }, + }, + } + + initEnv([]runtime.Object{ns0, ns1, ns3, node1, node2, clientPod}, []runtime.Object{nqos}, initialDB) + // init controller for default network + initNetworkQoSController(&util.DefaultNetInfo{}, defaultAddrsetFactory, defaultControllerName) + // init controller for stream nad + nad := ovnk8stesting.GenerateNAD("stream", "stream", "default", types.Layer3Topology, "10.128.2.0/16/24", types.NetworkRoleSecondary) + streamImmutableNadInfo, err := util.ParseNADInfo(nad) + Expect(err).NotTo(HaveOccurred()) + streamNadInfo := util.NewMutableNetInfo(streamImmutableNadInfo) + streamNadInfo.AddNADs("default/stream") + initNetworkQoSController(streamNadInfo, streamAddrsetFactory, streamControllerName) +} + +var _ = AfterEach(func() { + shutdownController() + if nbsbCleanup != nil { + nbsbCleanup.Cleanup() + nbsbCleanup = nil + } +}) + +var _ = Describe("NetworkQoS Controller", func() { + + var _ = Context("With different interconnect configurations", func() { + + DescribeTable("When starting controller with NetworkQoS, Pod and Node objects", + func(enableInterconnect bool) { + tableEntrySetup(enableInterconnect) + + By("creates address sets for source and destination pod selectors") + { + eventuallyExpectAddressSet(defaultAddrsetFactory, nqosNamespace, nqosName, "src", "0", defaultControllerName) + eventuallyExpectAddressSet(defaultAddrsetFactory, nqosNamespace, nqosName, "0", "0", defaultControllerName) + eventuallyExpectAddressSet(defaultAddrsetFactory, nqosNamespace, nqosName, "1", "0", defaultControllerName) + } + + By("creates QoS rules in ovn nb") + { + qos0 := eventuallyExpectQoS(defaultControllerName, nqosNamespace, nqosName, 0) + qos1 := eventuallyExpectQoS(defaultControllerName, nqosNamespace, nqosName, 1) + eventuallySwitchHasQoS("node1", qos0) + eventuallySwitchHasQoS("node1", qos1) + eventuallyAddressSetHas(defaultAddrsetFactory, nqosNamespace, nqosName, "src", "0", defaultControllerName, "10.192.177.4") + sourceAddrSet, err := findAddressSet(defaultAddrsetFactory, nqosNamespace, nqosName, "src", "0", defaultControllerName) + Expect(err).NotTo(HaveOccurred()) + dst1AddrSet, err1 := findAddressSet(defaultAddrsetFactory, nqosNamespace, nqosName, "0", "0", defaultControllerName) + Expect(err1).NotTo(HaveOccurred()) + srcHashName4, _ := sourceAddrSet.GetASHashNames() + dst1HashName4, _ := dst1AddrSet.GetASHashNames() + Expect(qos0.Match).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || (ip4.dst == 128.116.0.0/17 && ip4.dst != {128.116.0.0,128.116.0.255})) && tcp && tcp.dst == 8080", srcHashName4, dst1HashName4))) + Expect(qos0.Action).To(ContainElement(50)) + Expect(qos0.Priority).To(Equal(11000)) + Expect(qos0.Bandwidth).To(ContainElements(10000, 100000)) + dst3AddrSet, err3 := findAddressSet(defaultAddrsetFactory, nqosNamespace, nqosName, "1", "0", defaultControllerName) + Expect(err3).NotTo(HaveOccurred()) + dst3HashName4, _ := dst3AddrSet.GetASHashNames() + Expect(qos1.Match).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || (ip4.dst == 128.118.0.0/17 && ip4.dst != {128.118.0.0,128.118.0.255})) && udp && udp.dst == 9090", srcHashName4, dst3HashName4))) + } + + app1Pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: app1Namespace, + Name: "app1-pod", + Labels: map[string]string{ + "component": "service1", + }, + Annotations: map[string]string{ + "k8s.ovn.org/pod-networks": `{"default":{"ip_addresses":["10.194.188.4/26"],"mac_address":"0a:58:0a:c2:bc:04","gateway_ips":["10.194.188.1"],"routes":[{"dest":"10.194.0.0/16","nextHop":"10.194.188.1"},{"dest":"10.223.0.0/16","nextHop":"10.194.188.1"},{"dest":"100.64.0.0/16","nextHop":"10.194.188.1"}],"mtu":"1500","ip_address":"10.194.188.4/26","gateway_ip":"10.194.188.1"}}`, + }, + }, + Spec: corev1.PodSpec{ + HostNetwork: false, + NodeName: "node2", + }, + } + + By("adds IP to destination address set for matching pod") + { + _, err := fakeKubeClient.CoreV1().Pods(app1Pod.Namespace).Create(context.TODO(), app1Pod, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyAddressSetHas(defaultAddrsetFactory, nqosNamespace, nqosName, "0", "0", defaultControllerName, "10.194.188.4") + + By("updates match strings if egress rules change") + nqosUpdate, err := fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Get(context.TODO(), nqosName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + nqosUpdate.ResourceVersion = time.Now().String() + nqosUpdate.Spec.Egress[1].Classifier.To[1].IPBlock.Except = nil + _, err = fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Update(context.TODO(), nqosUpdate, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + sourceAddrSet, err := findAddressSet(defaultAddrsetFactory, nqosNamespace, nqosName, "src", "0", defaultControllerName) + Expect(err).NotTo(HaveOccurred()) + dst1AddrSet, err1 := findAddressSet(defaultAddrsetFactory, nqosNamespace, nqosName, "0", "0", defaultControllerName) + Expect(err1).NotTo(HaveOccurred()) + srcHashName4, _ := sourceAddrSet.GetASHashNames() + dst1HashName4, _ := dst1AddrSet.GetASHashNames() + + Eventually(func() string { + qos, err := findQoS(defaultControllerName, nqosNamespace, nqosName, 0) + if err != nil { + return err.Error() + } + return qos.Match + }).WithTimeout(5 * time.Second).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || (ip4.dst == 128.116.0.0/17 && ip4.dst != {128.116.0.0,128.116.0.255})) && tcp && tcp.dst == 8080", srcHashName4, dst1HashName4))) + + dst3AddrSet, err3 := findAddressSet(defaultAddrsetFactory, nqosNamespace, nqosName, "1", "0", defaultControllerName) + Expect(err3).NotTo(HaveOccurred()) + dst3HashName4, _ := dst3AddrSet.GetASHashNames() + Eventually(func() string { + qos, err := findQoS(defaultControllerName, nqosNamespace, nqosName, 1) + if err != nil { + return err.Error() + } + return qos.Match + }).WithTimeout(5 * time.Second).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || ip4.dst == 128.118.0.0/17) && udp && udp.dst == 9090", srcHashName4, dst3HashName4))) + } + + By("removes IP from destination address set if pod's labels don't match the selector") + { + updatePod := app1Pod.DeepCopy() + updatePod.Labels["component"] = "dummy" + updatePod.ResourceVersion = time.Now().String() + _, err := fakeKubeClient.CoreV1().Pods(app1Pod.Namespace).Update(context.TODO(), updatePod, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyAddressSetHasNo(defaultAddrsetFactory, nqosNamespace, nqosName, "0", "0", defaultControllerName, "10.194.188.4") + } + + By("adds IP to destination address set again if pod's labels match the selector") + { + updatePod := app1Pod.DeepCopy() + updatePod.Labels["component"] = "service1" + _, err := fakeKubeClient.CoreV1().Pods(app1Pod.Namespace).Update(context.TODO(), updatePod, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyAddressSetHas(defaultAddrsetFactory, nqosNamespace, nqosName, "0", "0", defaultControllerName, "10.194.188.4") + } + + By("removes IP from destination address set if target namespace labels don't match the selector") + { + ns, err := fakeKubeClient.CoreV1().Namespaces().Get(context.TODO(), app1Namespace, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + ns.ResourceVersion = time.Now().String() + ns.Labels["app"] = "dummy" + _, err = fakeKubeClient.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyAddressSetHasNo(defaultAddrsetFactory, nqosNamespace, nqosName, "0", "0", defaultControllerName, "10.194.188.4") + } + + By("adds IP to destination address set again if namespace's labels match the selector") + { + ns, err := fakeKubeClient.CoreV1().Namespaces().Get(context.TODO(), app1Namespace, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + ns.ResourceVersion = time.Now().String() + ns.Labels["app"] = "app1" + _, err = fakeKubeClient.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyAddressSetHas(defaultAddrsetFactory, nqosNamespace, nqosName, "0", "0", defaultControllerName, "10.194.188.4") + } + + By("removes IP from destination address set if namespace selector changes") + { + nqosUpdate, err := fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Get(context.TODO(), nqosName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + nqosUpdate.Spec.Egress[0].Classifier.To[0].NamespaceSelector = &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "dummy", + }, + } + nqosUpdate.ResourceVersion = time.Now().String() + _, err = fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Update(context.TODO(), nqosUpdate, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyAddressSetHasNo(defaultAddrsetFactory, nqosNamespace, nqosName, "0", "0", defaultControllerName, "10.194.188.4") + } + + By("adds IP to destination address set if namespace selector is restored") + { + nqosUpdate, err := fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Get(context.TODO(), nqosName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + nqosUpdate.Spec.Egress[0].Classifier.To[0].NamespaceSelector = &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "app1", + }, + } + nqosUpdate.ResourceVersion = time.Now().String() + _, err = fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Update(context.TODO(), nqosUpdate, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyAddressSetHas(defaultAddrsetFactory, nqosNamespace, nqosName, "0", "0", defaultControllerName, "10.194.188.4") + } + + app3Pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: app3Namespace, + Name: "app3-pod", + Labels: map[string]string{ + "component": "service3", + }, + Annotations: map[string]string{ + "k8s.ovn.org/pod-networks": `{"default":{"ip_addresses":["10.195.188.4/26"],"mac_address":"0a:58:0a:c3:bc:04","gateway_ips":["10.195.188.1"],"routes":[{"dest":"10.195.0.0/16","nextHop":"10.195.188.1"},{"dest":"10.223.0.0/16","nextHop":"10.195.188.1"},{"dest":"100.64.0.0/16","nextHop":"10.195.188.1"}],"mtu":"1500","ip_address":"10.195.188.4/26","gateway_ip":"10.195.188.1"}}`, + }, + }, + Spec: corev1.PodSpec{ + HostNetwork: false, + NodeName: "node2", + }, + } + + By("adds IP to destination address set of the second rule for matching pod") + { + _, err := fakeKubeClient.CoreV1().Pods(app3Pod.Namespace).Create(context.TODO(), app3Pod, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyAddressSetHas(defaultAddrsetFactory, nqosNamespace, nqosName, "1", "0", defaultControllerName, "10.195.188.4") + } + + By("adds new QoS rule to ovn nb when a new Egress rule is added") + { + nqosUpdate, err := fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Get(context.TODO(), nqosName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + nqosUpdate.Spec.Egress = append(nqosUpdate.Spec.Egress, nqostype.Rule{ + DSCP: 102, + Classifier: nqostype.Classifier{ + To: []nqostype.Destination{ + { + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "app1", + }, + }, + }, + }, + }, + }) + nqosUpdate.ResourceVersion = time.Now().String() + _, err = fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Update(context.TODO(), nqosUpdate, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyExpectQoS(defaultControllerName, nqosNamespace, nqosName, 2) + eventuallyAddressSetHas(defaultAddrsetFactory, nqosNamespace, nqosName, "src", "0", defaultControllerName, "10.192.177.4") + eventuallyAddressSetHas(defaultAddrsetFactory, nqosNamespace, nqosName, "0", "0", defaultControllerName, "10.194.188.4") + eventuallyAddressSetHas(defaultAddrsetFactory, nqosNamespace, nqosName, "1", "0", defaultControllerName, "10.195.188.4") + eventuallyAddressSetHas(defaultAddrsetFactory, nqosNamespace, nqosName, "2", "0", defaultControllerName, "10.194.188.4") + } + + nqos4StreamNet := &nqostype.NetworkQoS{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: nqosNamespace, + Name: "stream-qos", + }, + Spec: nqostype.Spec{ + NetworkAttachmentRefs: []corev1.ObjectReference{ + { + Kind: "NetworkAttachmentDefinition", + Namespace: "default", + Name: "unknown", + }, + }, + Priority: 100, + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "client", + }, + }, + Egress: []nqostype.Rule{ + { + DSCP: 50, + Bandwidth: nqostype.Bandwidth{ + Rate: 10000, + Burst: 100000, + }, + Classifier: nqostype.Classifier{ + To: []nqostype.Destination{ + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "128.115.0.0/17", + Except: []string{ + "128.115.0.0", + "128.115.0.255", + }, + }, + }, + }, + }, + }, + }, + }, + } + + By("will not handle NetworkQos with unknown NetworkAttachmentDefinition in spec") + { + _, err := fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Create(context.TODO(), nqos4StreamNet, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyExpectNoQoS(defaultControllerName, nqosNamespace, "stream-qos", 0) + } + + By("handles NetworkQos on secondary network") + { + nqos4StreamNet.Spec.NetworkAttachmentRefs = []corev1.ObjectReference{ + { + Kind: "NetworkAttachmentDefinition", + Namespace: "default", + Name: "stream", + }, + } + nqos4StreamNet.ResourceVersion = time.Now().String() + _, err := fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Update(context.TODO(), nqos4StreamNet, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + qos := eventuallyExpectQoS(streamControllerName, nqosNamespace, "stream-qos", 0) + eventuallySwitchHasQoS("stream_node1", qos) + eventuallyAddressSetHas(streamAddrsetFactory, nqosNamespace, "stream-qos", "src", "0", streamControllerName, "10.128.2.3") + } + + By("uses namespace's address set as source if pod selector is not provided in source") + { + dbIDs := libovsdbops.NewDbObjectIDs(libovsdbops.AddressSetNamespace, defaultControllerName, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: nqosNamespace, + }) + addrset, err := defaultAddrsetFactory.EnsureAddressSet(dbIDs) + addrset.AddAddresses([]string{"10.194.188.4"}) + Expect(err).NotTo(HaveOccurred()) + nqosWithoutSrcSelector := &nqostype.NetworkQoS{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: nqosNamespace, + Name: "no-source-selector", + }, + Spec: nqostype.Spec{ + Priority: 100, + Egress: []nqostype.Rule{ + { + DSCP: 50, + Bandwidth: nqostype.Bandwidth{ + Rate: 10000, + Burst: 100000, + }, + Classifier: nqostype.Classifier{ + To: []nqostype.Destination{ + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "128.115.0.0/17", + Except: []string{ + "128.115.0.0", + "128.115.0.255", + }, + }, + }, + }, + }, + }, + }, + }, + } + _, err = fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Create(context.TODO(), nqosWithoutSrcSelector, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + qos := eventuallyExpectQoS(defaultControllerName, nqosNamespace, "no-source-selector", 0) + v4HashName, _ := addrset.GetASHashNames() + Expect(qos.Match).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && ip4.dst == 128.115.0.0/17 && ip4.dst != {128.115.0.0,128.115.0.255}", v4HashName))) + } + + By("clear QoS attributes of existing NetworkQoS and make sure that is proper") + { + nqosWithoutSrcSelector := &nqostype.NetworkQoS{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: nqosNamespace, + Name: "no-source-selector", + }, + Spec: nqostype.Spec{ + Priority: 1, + Egress: []nqostype.Rule{ + { + DSCP: 50, + // Bandwidth: nqostype.Bandwidth{}, + Classifier: nqostype.Classifier{ + To: []nqostype.Destination{ + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "128.115.0.0/17", + Except: []string{ + "128.115.0.0", + "123.123.123.123", + }, + }, + }, + }, + }, + }, + }, + }, + } + nqosWithoutSrcSelector.ResourceVersion = time.Now().String() + _, err := fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Update(context.TODO(), nqosWithoutSrcSelector, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + dbIDs := libovsdbops.NewDbObjectIDs(libovsdbops.AddressSetNamespace, defaultControllerName, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: nqosNamespace, + }) + addrset, err := defaultAddrsetFactory.EnsureAddressSet(dbIDs) + Expect(err).NotTo(HaveOccurred()) + v4HashName, _ := addrset.GetASHashNames() + + // Ensure that QoS priority and Bandwidth have been properly changed by OVN + var qos *nbdb.QoS + Eventually(func() bool { + qos, err = findQoS(defaultControllerName, nqosNamespace, "no-source-selector", 0) + Expect(err).NotTo(HaveOccurred()) + Expect(qos).NotTo(BeNil()) + return qos.Priority == 10010 && len(qos.Bandwidth) == 0 + }).WithTimeout(5 * time.Second).WithPolling(1 * time.Second).Should(BeTrue()) + Expect(qos.Match).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && ip4.dst == 128.115.0.0/17 && ip4.dst != {128.115.0.0,123.123.123.123}", v4HashName))) + } + + By("removes IP from destination address set if pod is deleted") + { + err := fakeKubeClient.CoreV1().Pods(app1Pod.Namespace).Delete(context.TODO(), app1Pod.Name, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyAddressSetHasNo(defaultAddrsetFactory, nqosNamespace, nqosName, "0", "0", defaultControllerName, "10.194.188.4") + } + + By("removes IP from destination address set of the second rule if namespace is deleted") + { + err := fakeKubeClient.CoreV1().Namespaces().Delete(context.TODO(), app3Pod.Namespace, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyAddressSetHasNo(defaultAddrsetFactory, nqosNamespace, nqosName, "1", "0", defaultControllerName, "10.195.188.4") + err = fakeKubeClient.CoreV1().Pods(app3Pod.Namespace).Delete(context.TODO(), app3Pod.Name, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + + By("deletes stale QoS from ovn nb when Egress rule is deleted") + { + qos2, err1 := findQoS(defaultControllerName, nqosNamespace, nqosName, 2) + Expect(err1).NotTo(HaveOccurred()) + nqosUpdate, err := fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Get(context.TODO(), nqosName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + nqosUpdate.ResourceVersion = time.Now().String() + nqosUpdate.Spec.Egress = slices.Delete(nqosUpdate.Spec.Egress, 1, 2) + _, err = fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Update(context.TODO(), nqosUpdate, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallySwitchHasNoQoS("node1", qos2) + eventuallyExpectNoQoS(defaultControllerName, nqosNamespace, nqosName, 2) + } + + By("unbinds QoS rule from logical switch when no source pods is selected") + { + qos0, err0 := findQoS(defaultControllerName, nqosNamespace, nqosName, 0) + Expect(err0).NotTo(HaveOccurred()) + qos1, err1 := findQoS(defaultControllerName, nqosNamespace, nqosName, 1) + Expect(err1).NotTo(HaveOccurred()) + // qos should be present, as pod is not yet deleted + eventuallySwitchHasQoS("node1", qos0) + eventuallySwitchHasQoS("node1", qos1) + err := fakeKubeClient.CoreV1().Pods(nqosNamespace).Delete(context.TODO(), clientPodName, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + // qos should be unbound from switch + eventuallySwitchHasNoQoS("node1", qos0) + eventuallySwitchHasNoQoS("node1", qos1) + } + + By("deletes QoS after NetworkQoS object is deleted") + { + err := fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Delete(context.TODO(), nqosName, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyExpectNoQoS(defaultControllerName, nqosNamespace, nqosName, 0) + eventuallyExpectNoQoS(defaultControllerName, nqosNamespace, nqosName, 1) + } + + By("generates correct logical switch name for localnet topology") + { + localnetNad := ovnk8stesting.GenerateNAD("netwk1", "netwk1", "default", types.LocalnetTopology, "10.129.0.0/16", types.NetworkRoleSecondary) + localnetImmutableNadInfo, err := util.ParseNADInfo(localnetNad) + Expect(err).NotTo(HaveOccurred()) + localnetNadInfo := util.NewMutableNetInfo(localnetImmutableNadInfo) + localnetNadInfo.AddNADs("default/netwk1") + ctrl := initNetworkQoSController(localnetNadInfo, addressset.NewFakeAddressSetFactory("netwk1-controller"), "netwk1-controller") + lsName := ctrl.getLogicalSwitchName("dummy") + Expect(lsName).To(Equal("netwk1_ovn_localnet_switch")) + } + + By("generates correct logical switch name for layer2 topology") + { + layer2Nad := ovnk8stesting.GenerateNAD("netwk2", "netwk2", "default", types.Layer2Topology, "10.130.0.0/16", types.NetworkRoleSecondary) + layer2ImmutableNadInfo, err := util.ParseNADInfo(layer2Nad) + Expect(err).NotTo(HaveOccurred()) + layer2NadInfo := util.NewMutableNetInfo(layer2ImmutableNadInfo) + layer2NadInfo.AddNADs("default/netwk2") + ctrl := initNetworkQoSController(layer2NadInfo, addressset.NewFakeAddressSetFactory("netwk2-controller"), "netwk2-controller") + lsName := ctrl.getLogicalSwitchName("dummy") + Expect(lsName).To(Equal("netwk2_ovn_layer2_switch")) + } + }, + Entry("Interconnect Disabled", false), + Entry("Interconnect Enabled", true), + ) + }) +}) + +func eventuallyExpectAddressSet(addrsetFactory addressset.AddressSetFactory, nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex, controllerName string) { + Eventually(func() bool { + addrset, _ := findAddressSet(addrsetFactory, nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex, controllerName) + return addrset != nil + }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("address set not found for %s/%s, rule %s, address block %s", nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex)) +} + +func eventuallyAddressSetHas(addrsetFactory addressset.AddressSetFactory, nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex, controllerName, ip string) { + Eventually(func() bool { + addrset, _ := findAddressSet(addrsetFactory, nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex, controllerName) + if addrset == nil { + return false + } + ip4, _ := addrset.GetAddresses() + return slices.Contains(ip4, ip) + }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("address set does not contain expected ip %s", ip)) +} + +func eventuallyAddressSetHasNo(addrsetFactory addressset.AddressSetFactory, nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex, controllerName, ip string) { + Eventually(func() bool { + addrset, _ := findAddressSet(addrsetFactory, nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex, controllerName) + if addrset == nil { + return true + } + ip4, _ := addrset.GetAddresses() + return !slices.Contains(ip4, ip) + }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("address set still has unexpected ip %s", ip)) +} + +func findAddressSet(addrsetFactory addressset.AddressSetFactory, nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex, controllerName string) (addressset.AddressSet, error) { + dbID := GetNetworkQoSAddrSetDbIDs(nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex, controllerName) + return addrsetFactory.GetAddressSet(dbID) +} + +func eventuallyExpectQoS(controllerName, qosNamespace, qosName string, index int) *nbdb.QoS { + var qos *nbdb.QoS + Eventually(func() bool { + qos, _ = findQoS(controllerName, qosNamespace, qosName, index) + return qos != nil + }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("QoS not found for %s/%s", qosNamespace, qosName)) + return qos +} + +func eventuallyExpectNoQoS(controllerName, qosNamespace, qosName string, index int) { + var qos *nbdb.QoS + Eventually(func() bool { + qos, _ = findQoS(controllerName, qosNamespace, qosName, index) + return qos == nil + }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("Unexpected QoS found for %s/%s, index %d", qosNamespace, qosName, index)) +} + +func findQoS(controllerName, qosNamespace, qosName string, index int) (*nbdb.QoS, error) { + qosKey := joinMetaNamespaceAndName(qosNamespace, qosName, ":") + dbIDs := libovsdbops.NewDbObjectIDs(libovsdbops.NetworkQoS, controllerName, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: qosKey, + libovsdbops.RuleIndex: fmt.Sprintf("%d", index), + }) + predicate := libovsdbops.GetPredicate(dbIDs, func(item *nbdb.QoS) bool { + return item.ExternalIDs[libovsdbops.OwnerControllerKey.String()] == controllerName && + item.ExternalIDs[libovsdbops.ObjectNameKey.String()] == qosKey && + item.ExternalIDs[libovsdbops.RuleIndex.String()] == strconv.Itoa(index) + }) + qoses, err := libovsdbops.FindQoSesWithPredicate(nbClient, predicate) + if err != nil { + return nil, err + } + if len(qoses) == 1 { + return qoses[0], nil + } + return nil, nil +} + +func eventuallySwitchHasQoS(switchName string, qos *nbdb.QoS) { + var ls *nbdb.LogicalSwitch + Eventually(func() bool { + criteria := &nbdb.LogicalSwitch{ + Name: switchName, + } + ls, _ = libovsdbops.GetLogicalSwitch(nbClient, criteria) + return ls != nil && slices.Contains(ls.QOSRules, qos.UUID) + }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("QoS rule %s not found in switch %s", qos.UUID, switchName)) +} + +func eventuallySwitchHasNoQoS(switchName string, qos *nbdb.QoS) { + var ls *nbdb.LogicalSwitch + Eventually(func() bool { + criteria := &nbdb.LogicalSwitch{ + Name: switchName, + } + ls, _ = libovsdbops.GetLogicalSwitch(nbClient, criteria) + return ls != nil && !slices.Contains(ls.QOSRules, qos.UUID) + }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("Unexpected QoS rule %s found in switch %s", qos.UUID, switchName)) +} + +func initEnv(k8sObjects []runtime.Object, nqosObjects []runtime.Object, initialDB *libovsdbtest.TestSetup) { + var nbZoneFailed bool + var err error + stopChan = make(chan struct{}) + fakeKubeClient = fake.NewSimpleClientset(k8sObjects...) + fakeNQoSClient = fakenqosclient.NewSimpleClientset(nqosObjects...) + watchFactory, err = factory.NewMasterWatchFactory( + &util.OVNMasterClientset{ + KubeClient: fakeKubeClient, + NetworkQoSClient: fakeNQoSClient, + }, + ) + Expect(err).NotTo(HaveOccurred()) + + if initialDB == nil { + initialDB = &libovsdbtest.TestSetup{} + } + nbClient, nbsbCleanup, err = libovsdbtest.NewNBTestHarness(*initialDB, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = libovsdbutil.GetNBZone(nbClient) + if err != nil { + nbZoneFailed = true + err = createTestNBGlobal(nbClient, "global") + Expect(err).NotTo(HaveOccurred()) + } + + if nbZoneFailed { + err = deleteTestNBGlobal(nbClient) + Expect(err).NotTo(HaveOccurred()) + } + defaultAddrsetFactory = addressset.NewFakeAddressSetFactory(defaultControllerName) + streamAddrsetFactory = addressset.NewFakeAddressSetFactory("stream-network-controller") +} + +func initNetworkQoSController(netInfo util.NetInfo, addrsetFactory addressset.AddressSetFactory, controllerName string) *Controller { + nqosController, err := NewController( + controllerName, + netInfo, + nbClient, + util.EventRecorder(fakeKubeClient), + fakeNQoSClient, + watchFactory.NetworkQoSInformer(), + watchFactory.NamespaceCoreInformer(), + watchFactory.PodCoreInformer(), + watchFactory.NodeCoreInformer(), + addrsetFactory, + func(pod *corev1.Pod) bool { + return pod.Spec.NodeName == "node1" + }, "node1") + Expect(err).NotTo(HaveOccurred()) + err = watchFactory.Start() + Expect(err).NotTo(HaveOccurred()) + wg.Add(1) + go func() { + defer wg.Done() + nqosController.Run(1, stopChan) + }() + return nqosController +} + +func shutdownController() { + if watchFactory != nil { + watchFactory.Shutdown() + watchFactory = nil + } + if stopChan != nil { + close(stopChan) + stopChan = nil + } +} + +func createTestNBGlobal(nbClient libovsdbclient.Client, zone string) error { + nbGlobal := &nbdb.NBGlobal{Name: zone} + ops, err := nbClient.Create(nbGlobal) + if err != nil { + return err + } + + _, err = nbClient.Transact(context.Background(), ops...) + if err != nil { + return err + } + + return nil +} + +func deleteTestNBGlobal(nbClient libovsdbclient.Client) error { + p := func(nbGlobal *nbdb.NBGlobal) bool { + return true + } + ops, err := nbClient.WhereCache(p).Delete() + if err != nil { + return err + } + _, err = nbClient.Transact(context.Background(), ops...) + if err != nil { + return err + } + + return nil +} diff --git a/go-controller/pkg/ovn/controller/network_qos/repair.go b/go-controller/pkg/ovn/controller/network_qos/repair.go new file mode 100644 index 0000000000..edfbf17dd8 --- /dev/null +++ b/go-controller/pkg/ovn/controller/network_qos/repair.go @@ -0,0 +1,77 @@ +package networkqos + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/klog/v2" + + networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// repairNetworkQoSes is called at startup and as the name suggests +// aims to repair the NBDB logical objects +// that are created for the network qoses in the cluster +func (c *Controller) repairNetworkQoSes() error { + start := time.Now() + defer func() { + klog.Infof("Repairing network qos took %v", time.Since(start)) + }() + nqoses, err := c.nqosLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("unable to list NetworkQoSes from the lister: %v", err) + } + nqosMap := map[string]*networkqosapi.NetworkQoS{} + for _, nqos := range nqoses { + nqosMap[joinMetaNamespaceAndName(nqos.Namespace, nqos.Name, ":")] = nqos + } + + // delete stale ovn qos objects owned by NetworkQoS + if err := libovsdbops.DeleteQoSesWithPredicate(c.nbClient, func(qos *nbdb.QoS) bool { + if qos.ExternalIDs[libovsdbops.OwnerControllerKey.String()] != c.controllerName || + qos.ExternalIDs[libovsdbops.OwnerTypeKey.String()] == string(libovsdbops.NetworkQoSOwnerType) { + return false + } + objName := qos.ExternalIDs[libovsdbops.ObjectNameKey.String()] + // doesn't have corresponding k8s name + if objName == "" { + klog.Warningf("OVN QoS %s doesn't have expected key %s", qos.UUID, libovsdbops.ObjectNameKey.String()) + return true + } + // clean up qoses whose k8s object has gone + if _, exists := nqosMap[objName]; !exists { + klog.Warningf("OVN QoS %s doesn't have expected NetworkQoS object %s", qos.UUID, objName) + return true + } + return false + }); err != nil { + klog.Errorf("Failed to get ops to clean up stale QoSes: %v", err) + } + + // delete address sets whose networkqos object has gone in k8s + if err := libovsdbops.DeleteAddressSetsWithPredicate(c.nbClient, func(addrset *nbdb.AddressSet) bool { + if addrset.ExternalIDs[libovsdbops.OwnerControllerKey.String()] != c.controllerName || + addrset.ExternalIDs[libovsdbops.OwnerTypeKey.String()] != string(libovsdbops.NetworkQoSOwnerType) { + return false + } + objName := addrset.ExternalIDs[libovsdbops.ObjectNameKey.String()] + // doesn't have corresponding k8s name + if objName == "" { + klog.Warningf("AddressSet %s doesn't have expected key %s", addrset.UUID, libovsdbops.ObjectNameKey.String()) + return true + } + // clean up qoses whose k8s object has gone + if _, exists := nqosMap[objName]; !exists { + klog.Warningf("AddressSet %s doesn't have expected NetworkQoS object %s", addrset.UUID, objName) + return true + } + return false + }); err != nil { + klog.Errorf("Failed to get ops clean up stale address sets: %v", err) + } + + return nil +} diff --git a/go-controller/pkg/ovn/controller/network_qos/types.go b/go-controller/pkg/ovn/controller/network_qos/types.go new file mode 100644 index 0000000000..8ea1ddd8d1 --- /dev/null +++ b/go-controller/pkg/ovn/controller/network_qos/types.go @@ -0,0 +1,392 @@ +package networkqos + +import ( + "fmt" + "slices" + "strconv" + "strings" + "sync" + "time" + + v1 "k8s.io/api/core/v1" + knet "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/klog/v2" + utilnet "k8s.io/utils/net" + + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +// networkQoSState is the cache that keeps the state of a single +// network qos in the cluster with namespace+name being unique +type networkQoSState struct { + sync.RWMutex + // name of the network qos + name string + namespace string + + SrcAddrSet addressset.AddressSet + Pods sync.Map // pods name -> ips in the srcAddrSet + SwitchRefs sync.Map // switch name -> list of source pods + PodSelector labels.Selector + + // egressRules stores the objects needed to track .Spec.Egress changes + EgressRules []*GressRule +} + +func (nqosState *networkQoSState) getObjectNameKey() string { + return joinMetaNamespaceAndName(nqosState.namespace, nqosState.name, ":") +} + +func (nqosState *networkQoSState) getDbObjectIDs(controller string, ruleIndex int) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.NetworkQoS, controller, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: nqosState.getObjectNameKey(), + libovsdbops.RuleIndex: fmt.Sprintf("%d", ruleIndex), + }) +} + +func (nqosState *networkQoSState) emptyPodSelector() bool { + return nqosState.PodSelector == nil || nqosState.PodSelector.Empty() +} + +func (nqosState *networkQoSState) initAddressSets(addressSetFactory addressset.AddressSetFactory, controllerName string) error { + var err error + // init source address set + if nqosState.emptyPodSelector() { + nqosState.SrcAddrSet, err = getNamespaceAddressSet(addressSetFactory, controllerName, nqosState.namespace) + } else { + nqosState.SrcAddrSet, err = addressSetFactory.EnsureAddressSet(GetNetworkQoSAddrSetDbIDs(nqosState.namespace, nqosState.name, "src", "0", controllerName)) + } + if err != nil { + return fmt.Errorf("failed to init source address set for %s/%s: %w", nqosState.namespace, nqosState.name, err) + } + + // ensure destination address sets + for ruleIndex, rule := range nqosState.EgressRules { + for destIndex, dest := range rule.Classifier.Destinations { + if dest.NamespaceSelector == nil && dest.PodSelector == nil { + continue + } + dest.DestAddrSet, err = addressSetFactory.EnsureAddressSet(GetNetworkQoSAddrSetDbIDs(nqosState.namespace, nqosState.name, strconv.Itoa(ruleIndex), strconv.Itoa(destIndex), controllerName)) + if err != nil { + return fmt.Errorf("failed to init destination address set for %s/%s: %w", nqosState.namespace, nqosState.name, err) + } + } + } + return nil +} + +func (nqosState *networkQoSState) matchSourceSelector(pod *v1.Pod) bool { + if pod.Namespace != nqosState.namespace { + return false + } + if nqosState.PodSelector == nil { + return true + } + return nqosState.PodSelector.Matches(labels.Set(pod.Labels)) +} + +func (nqosState *networkQoSState) configureSourcePod(ctrl *Controller, pod *v1.Pod, addresses []string) error { + fullPodName := joinMetaNamespaceAndName(pod.Namespace, pod.Name) + if nqosState.PodSelector != nil { + // if PodSelector is nil, use namespace's address set, so unnecessary to add ip here + if err := nqosState.SrcAddrSet.AddAddresses(addresses); err != nil { + return fmt.Errorf("failed to add addresses {%s} to address set %s for NetworkQoS %s/%s: %v", strings.Join(addresses, ","), nqosState.SrcAddrSet.GetName(), nqosState.namespace, nqosState.name, err) + } + nqosState.Pods.Store(fullPodName, addresses) + klog.V(4).Infof("Successfully added address (%s) of pod %s to address set %s", strings.Join(addresses, ","), fullPodName, nqosState.SrcAddrSet.GetName()) + } + // get switch name + switchName := ctrl.getLogicalSwitchName(pod.Spec.NodeName) + if switchName == "" { + return fmt.Errorf("failed to get logical switch name for node %s, topology %s", pod.Spec.NodeName, ctrl.TopologyType()) + } + + podList := []string{} + val, loaded := nqosState.SwitchRefs.Load(switchName) + if loaded { + podList = val.([]string) + } + + if !loaded { + klog.V(4).Infof("Adding NetworkQoS %s/%s to logical switch %s", nqosState.namespace, nqosState.name, switchName) + start := time.Now() + if err := ctrl.addQoSToLogicalSwitch(nqosState, switchName); err != nil { + return err + } + recordOvnOperationDuration("add", time.Since(start).Milliseconds()) + } + + podList = append(podList, fullPodName) + nqosState.SwitchRefs.Store(switchName, podList) + return nil +} + +func (nqosState *networkQoSState) removePodFromSource(ctrl *Controller, fullPodName string, addresses []string) error { + if len(addresses) == 0 { + // if no addresses is provided, try lookup in cache + if val, ok := nqosState.Pods.Load(fullPodName); ok { + addresses = val.([]string) + } + } + if len(addresses) > 0 && nqosState.PodSelector != nil { + // remove pod from non-namespace-scope source address set + if err := nqosState.SrcAddrSet.DeleteAddresses(addresses); err != nil { + return fmt.Errorf("failed to delete addresses (%s) from address set %s: %v", strings.Join(addresses, ","), nqosState.SrcAddrSet.GetName(), err) + } + } + nqosState.Pods.Delete(fullPodName) + return nqosState.removeZeroQoSNodes(ctrl, fullPodName) +} + +func (nqosState *networkQoSState) removeZeroQoSNodes(ctrl *Controller, fullPodName string) error { + zeroQoSSwitches := []string{} + // since node is unknown when pod is delete, iterate the SwitchRefs to remove the pod + nqosState.SwitchRefs.Range(func(key, val any) bool { + switchName := key.(string) + podList := val.([]string) + podList = slices.DeleteFunc(podList, func(s string) bool { + return s == fullPodName + }) + if len(podList) == 0 { + zeroQoSSwitches = append(zeroQoSSwitches, switchName) + } else { + nqosState.SwitchRefs.Store(switchName, podList) + } + return true + }) + // unbind qos from L3 logical switches where doesn't have source pods any more + if len(zeroQoSSwitches) > 0 && ctrl.TopologyType() == types.Layer3Topology { + start := time.Now() + if err := ctrl.removeQoSFromLogicalSwitches(nqosState, zeroQoSSwitches); err != nil { + return err + } + recordOvnOperationDuration("remove", time.Since(start).Milliseconds()) + for _, lsw := range zeroQoSSwitches { + nqosState.SwitchRefs.Delete(lsw) + } + } + return nil +} + +func (nqosState *networkQoSState) getAddressSetHashNames() []string { + addrsetNames := []string{} + if nqosState.SrcAddrSet != nil { + v4Hash, v6Hash := nqosState.SrcAddrSet.GetASHashNames() + addrsetNames = append(addrsetNames, v4Hash, v6Hash) + } + for _, rule := range nqosState.EgressRules { + for _, dest := range rule.Classifier.Destinations { + if dest.DestAddrSet != nil { + v4Hash, v6Hash := dest.DestAddrSet.GetASHashNames() + addrsetNames = append(addrsetNames, v4Hash, v6Hash) + } + } + } + return addrsetNames +} + +type GressRule struct { + Priority int + Dscp int + Classifier *Classifier + + // bandwitdh + Rate *int + Burst *int +} + +type protocol string + +func (p protocol) IsValid() bool { + switch p.String() { + case "tcp", "udp", "sctp": + return true + default: + return false + } +} + +func (p protocol) String() string { + return strings.ToLower(string(p)) +} + +type trafficDirection string + +const ( + trafficDirSource trafficDirection = "src" + trafficDirDest trafficDirection = "dst" +) + +type Classifier struct { + Destinations []*Destination + + // port + Protocol protocol + Port *int +} + +// ToQosMatchString generates dest and protocol/port part of QoS match string, based on +// Classifier's destinations, protocol and port fields, example: +// (ip4.dst == $addr_set_name || (ip4.dst == 128.116.0.0/17 && ip4.dst != {128.116.0.0,128.116.0.255})) && tcp && tcp.dst == 8080 +// Multiple destinations will be connected by "||". +// See https://github.com/ovn-org/ovn/blob/2bdf1129c19d5bd2cd58a3ddcb6e2e7254b05054/ovn-nb.xml#L2942-L3025 for details +func (c *Classifier) ToQosMatchString(ipv4Enabled, ipv6Enabled bool) string { + if c == nil { + return "" + } + destMatchStrings := []string{} + for _, dest := range c.Destinations { + match := "ip4.dst == 0.0.0.0/0 || ip6.dst == ::/0" + if dest.DestAddrSet != nil { + match = addressSetToMatchString(dest.DestAddrSet, trafficDirDest, ipv4Enabled, ipv6Enabled) + } else if dest.IpBlock != nil && dest.IpBlock.CIDR != "" { + ipVersion := "ip4" + if utilnet.IsIPv6CIDRString(dest.IpBlock.CIDR) { + ipVersion = "ip6" + } + if len(dest.IpBlock.Except) == 0 { + match = fmt.Sprintf("%s.%s == %s", ipVersion, trafficDirDest, dest.IpBlock.CIDR) + } else { + match = fmt.Sprintf("%s.%s == %s && %s.%s != {%s}", ipVersion, trafficDirDest, dest.IpBlock.CIDR, ipVersion, trafficDirDest, strings.Join(dest.IpBlock.Except, ",")) + } + } + destMatchStrings = append(destMatchStrings, match) + } + + output := "" + if len(destMatchStrings) == 1 { + output = destMatchStrings[0] + } else { + for index, str := range destMatchStrings { + if index > 0 { + output += " || " + } + if strings.Contains(str, "||") || strings.Contains(str, "&&") { + output = output + fmt.Sprintf("(%s)", str) + } else { + output = output + str + } + } + } + if strings.Contains(output, "||") { + output = fmt.Sprintf("(%s)", output) + } + if c.Protocol != "" { + if c.Port != nil && *c.Port > 0 { + match := fmt.Sprintf("%s && %s.dst == %d", c.Protocol.String(), c.Protocol.String(), *c.Port) + if output != "" { + output = fmt.Sprintf("%s && %s", output, match) + } else { + output = match + } + } else { + if output != "" { + output = fmt.Sprintf("%s && %s", output, c.Protocol.String()) + } else { + output = c.Protocol.String() + } + } + } + return output +} + +type Destination struct { + IpBlock *knet.IPBlock + + DestAddrSet addressset.AddressSet + PodSelector labels.Selector + Pods sync.Map // pods name -> ips in the destAddrSet + NamespaceSelector labels.Selector +} + +func (dest *Destination) matchNamespace(podNs *v1.Namespace, qosNamespace string) bool { + if dest.NamespaceSelector == nil { + return podNs.Name == qosNamespace + } + return dest.NamespaceSelector.Matches(labels.Set(podNs.Labels)) +} + +func (dest *Destination) matchPod(podNs *v1.Namespace, pod *v1.Pod, qosNamespace string) bool { + switch { + case dest.NamespaceSelector != nil && dest.PodSelector != nil: + return dest.NamespaceSelector.Matches(labels.Set(podNs.Labels)) && dest.PodSelector.Matches(labels.Set(pod.Labels)) + case dest.NamespaceSelector == nil && dest.PodSelector != nil: + return pod.Namespace == qosNamespace && dest.PodSelector.Matches(labels.Set(pod.Labels)) + case dest.NamespaceSelector != nil && dest.PodSelector == nil: + return dest.NamespaceSelector.Matches(labels.Set(podNs.Labels)) + default: //dest.NamespaceSelector == nil && dest.PodSelector == nil: + return false + } +} + +func (dest *Destination) addPod(podNamespace, podName string, addresses []string) error { + if err := dest.DestAddrSet.AddAddresses(addresses); err != nil { + return err + } + // add pod to map + dest.Pods.Store(joinMetaNamespaceAndName(podNamespace, podName), addresses) + return nil +} + +func (dest *Destination) removePod(fullPodName string, addresses []string) error { + if len(addresses) == 0 { + val, ok := dest.Pods.Load(fullPodName) + if ok && val != nil { + addresses = val.([]string) + } + } + if err := dest.DestAddrSet.DeleteAddresses(addresses); err != nil { + return fmt.Errorf("failed to remove addresses (%s): %v", strings.Join(addresses, ","), err) + } + dest.Pods.Delete(fullPodName) + return nil +} + +func (dest *Destination) removePodsInNamespace(namespace string) error { + var err error + // check for pods in the namespace being cleared + dest.Pods.Range(func(key, value any) bool { + fullPodName := key.(string) + nameParts := strings.Split(fullPodName, "/") + if nameParts[0] != namespace { + // pod's namespace doesn't match + return true + } + err = dest.removePod(fullPodName, nil) + return err == nil + }) + return err +} + +func (dest *Destination) addPodsInNamespace(ctrl *Controller, namespace string) error { + podSelector := labels.Everything() + if dest.PodSelector != nil { + podSelector = dest.PodSelector + } + pods, err := ctrl.nqosPodLister.Pods(namespace).List(podSelector) + if err != nil { + if errors.IsNotFound(err) || len(pods) == 0 { + return nil + } + return fmt.Errorf("failed to look up pods in ns %s: %v", namespace, err) + } + klog.V(5).Infof("Found %d pods in namespace %s by selector %s", len(pods), namespace, podSelector.String()) + for _, pod := range pods { + podAddresses, err := getPodAddresses(pod, ctrl.NetInfo) + if err != nil { + return fmt.Errorf("failed to parse IPs for pod %s/%s: %v", pod.Namespace, pod.Name, err) + } + if err := dest.addPod(pod.Namespace, pod.Name, podAddresses); err != nil { + return fmt.Errorf("failed to add addresses {%s} to address set %s: %v", strings.Join(podAddresses, ","), dest.DestAddrSet.GetName(), err) + } + } + return nil +} + +func getQoSRulePriority(qosPriority, ruleIndex int) int { + return 10000 + qosPriority*10 + ruleIndex +} diff --git a/go-controller/pkg/ovn/controller/network_qos/utils.go b/go-controller/pkg/ovn/controller/network_qos/utils.go new file mode 100644 index 0000000000..7d58a7ad3e --- /dev/null +++ b/go-controller/pkg/ovn/controller/network_qos/utils.go @@ -0,0 +1,84 @@ +package networkqos + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" + ovnkutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +func joinMetaNamespaceAndName(namespace, name string, separator ...string) string { + if namespace == "" { + return name + } + sep := "/" + if len(separator) > 0 { + sep = separator[0] + } + return namespace + sep + name +} + +func GetNetworkQoSAddrSetDbIDs(nqosNamespace, nqosName, ruleIndex, ipBlockIndex, controller string) *libovsdbops.DbObjectIDs { + return libovsdbops.NewDbObjectIDs(libovsdbops.AddressSetNetworkQoS, controller, + map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: joinMetaNamespaceAndName(nqosNamespace, nqosName, ":"), + // rule index is the unique id for address set within given objectName + libovsdbops.RuleIndex: ruleIndex, + libovsdbops.IpBlockIndexKey: ipBlockIndex, + }) +} + +func getPodAddresses(pod *corev1.Pod, networkInfo ovnkutil.NetInfo) ([]string, error) { + // check annotation "k8s.ovn.org/pod-networks" before calling GetPodIPsOfNetwork, + // as it's no easy to check if the error is caused by missing annotation, while + // we don't want to return error for such case as it will trigger retry + _, ok := pod.Annotations[ovnkutil.OvnPodAnnotationName] + if !ok { + // pod hasn't been annotated yet, return nil to avoid retry + return nil, nil + } + ips, err := ovnkutil.GetPodIPsOfNetwork(pod, networkInfo) + if err != nil { + return nil, err + } + addresses := []string{} + for _, ip := range ips { + addresses = append(addresses, ip.String()) + } + return addresses, nil +} + +func generateNetworkQoSMatch(qosState *networkQoSState, rule *GressRule, ipv4Enabled, ipv6Enabled bool) string { + match := addressSetToMatchString(qosState.SrcAddrSet, trafficDirSource, ipv4Enabled, ipv6Enabled) + + classiferMatchString := rule.Classifier.ToQosMatchString(ipv4Enabled, ipv6Enabled) + if classiferMatchString != "" { + match = match + " && " + classiferMatchString + } + + return match +} + +func addressSetToMatchString(addrset addressset.AddressSet, dir trafficDirection, ipv4Enabled, ipv6Enabled bool) string { + ipv4AddrSetHashName, ipv6AddrSetHashName := addrset.GetASHashNames() + output := "" + switch { + case ipv4Enabled && ipv6Enabled: + output = fmt.Sprintf("(ip4.%s == {$%s} || ip6.%s == {$%s})", dir, ipv4AddrSetHashName, dir, ipv6AddrSetHashName) + case ipv4Enabled: + output = fmt.Sprintf("ip4.%s == {$%s}", dir, ipv4AddrSetHashName) + case ipv6Enabled: + output = fmt.Sprintf("ip6.%s == {$%s}", dir, ipv6AddrSetHashName) + } + return output +} + +func getNamespaceAddressSet(addressSetFactory addressset.AddressSetFactory, controllerName, namespace string) (addressset.AddressSet, error) { + dbIDs := libovsdbops.NewDbObjectIDs(libovsdbops.AddressSetNamespace, controllerName, map[libovsdbops.ExternalIDKey]string{ + libovsdbops.ObjectNameKey: namespace, + }) + return addressSetFactory.EnsureAddressSet(dbIDs) +} diff --git a/go-controller/pkg/ovn/default_network_controller.go b/go-controller/pkg/ovn/default_network_controller.go index 705a4da468..03825cee2c 100644 --- a/go-controller/pkg/ovn/default_network_controller.go +++ b/go-controller/pkg/ovn/default_network_controller.go @@ -574,6 +574,19 @@ func (oc *DefaultNetworkController) run(_ context.Context) error { } } + if config.OVNKubernetesFeature.EnableNetworkQoS { + err := oc.newNetworkQoSController() + if err != nil { + return fmt.Errorf("unable to create network qos controller, err: %w", err) + } + oc.wg.Add(1) + go func() { + defer oc.wg.Done() + // Until we have scale issues in future let's spawn only one thread + oc.nqosController.Run(1, oc.stopChan) + }() + } + end := time.Since(start) klog.Infof("Completing all the Watchers took %v", end) metrics.MetricOVNKubeControllerSyncDuration.WithLabelValues("all watchers").Set(end.Seconds()) diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/secondary_layer3_network_controller.go index e2a4b84e35..466ba10926 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller.go @@ -610,6 +610,20 @@ func (oc *SecondaryLayer3NetworkController) run() error { } } + // start NetworkQoS controller if feature is enabled + if config.OVNKubernetesFeature.EnableNetworkQoS { + err := oc.newNetworkQoSController() + if err != nil { + return fmt.Errorf("unable to create network qos controller, err: %w", err) + } + oc.wg.Add(1) + go func() { + defer oc.wg.Done() + // Until we have scale issues in future let's spawn only one thread + oc.nqosController.Run(1, oc.stopChan) + }() + } + klog.Infof("Completing all the Watchers for network %s took %v", oc.GetNetworkName(), time.Since(start)) return nil diff --git a/go-controller/pkg/types/resource_status.go b/go-controller/pkg/types/resource_status.go index 2a69fd57c1..c7a2e51155 100644 --- a/go-controller/pkg/types/resource_status.go +++ b/go-controller/pkg/types/resource_status.go @@ -10,6 +10,7 @@ const ( APBRouteErrorMsg = "failed to apply policy" EgressFirewallErrorMsg = "EgressFirewall Rules not correctly applied" EgressQoSErrorMsg = "EgressQoS Rules not correctly applied" + NetworkQoSErrorMsg = "NetworkQoS Destinations not correctly applied" ) func GetZoneStatus(zoneID, message string) string { diff --git a/go-controller/pkg/util/fake_client.go b/go-controller/pkg/util/fake_client.go index 0286785d3f..51b624cac7 100644 --- a/go-controller/pkg/util/fake_client.go +++ b/go-controller/pkg/util/fake_client.go @@ -31,6 +31,8 @@ import ( egressqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake" egressservice "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" egressservicefake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/fake" + networkqos "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + networkqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake" routeadvertisements "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" routeadvertisementsfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/fake" udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" @@ -45,6 +47,7 @@ func GetOVNClientset(objects ...runtime.Object) *OVNClientset { egressServiceObjects := []runtime.Object{} apbExternalRouteObjects := []runtime.Object{} anpObjects := []runtime.Object{} + networkQoSObjects := []runtime.Object{} v1Objects := []runtime.Object{} nads := []runtime.Object{} cloudObjects := []runtime.Object{} @@ -80,6 +83,8 @@ func GetOVNClientset(objects ...runtime.Object) *OVNClientset { raObjects = append(raObjects, object) case *frrapi.FRRConfiguration: frrObjects = append(frrObjects, object) + case *networkqos.NetworkQoS: + networkQoSObjects = append(networkQoSObjects, object) default: v1Objects = append(v1Objects, object) } @@ -107,6 +112,7 @@ func GetOVNClientset(objects ...runtime.Object) *OVNClientset { UserDefinedNetworkClient: udnfake.NewSimpleClientset(udnObjects...), RouteAdvertisementsClient: routeadvertisementsfake.NewSimpleClientset(raObjects...), FRRClient: frrfake.NewSimpleClientset(frrObjects...), + NetworkQoSClient: networkqosfake.NewSimpleClientset(networkQoSObjects...), } } diff --git a/go-controller/pkg/util/kube.go b/go-controller/pkg/util/kube.go index becc64c95f..7fb84610ea 100644 --- a/go-controller/pkg/util/kube.go +++ b/go-controller/pkg/util/kube.go @@ -53,7 +53,6 @@ import ( networkqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" routeadvertisementsclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned" userdefinednetworkclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" ) // OVNClientset is a wrapper around all clientsets used by OVN-Kubernetes From d70b47474e98688761a33bba6f73eb3f54c3eb60 Mon Sep 17 00:00:00 2001 From: Xiaobin Qu Date: Fri, 20 Sep 2024 12:34:55 -0700 Subject: [PATCH 07/18] add e2e tests for networkqos Signed-off-by: Xiaobin Qu (cherry picked from commit 98d4ceb470d10dadd68546ac9bd0a6dddeaa841d) --- test/e2e/networkqos.go | 770 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 770 insertions(+) create mode 100644 test/e2e/networkqos.go diff --git a/test/e2e/networkqos.go b/test/e2e/networkqos.go new file mode 100644 index 0000000000..9bacc84188 --- /dev/null +++ b/test/e2e/networkqos.go @@ -0,0 +1,770 @@ +package e2e + +import ( + "context" + "encoding/json" + "fmt" + "net" + "os" + "strconv" + "strings" + "time" + + "golang.org/x/sync/errgroup" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" +) + +var _ = ginkgo.Describe("e2e NetworkQoS validation", func() { + const ( + podImage = "ghcr.io/nicolaka/netshoot:v0.13" + networkQoSYaml = "networkqos.yaml" + nqosSpecName = "nqos-test-spec" + srcPodName = "src-nqos-pod" + tcpdumpIPv4 = "(ip and (ip[1] & 0xfc) >> 2 == %d)" + tcpdumpIPv6 = "(ip6 and (ip6[0:2] & 0xfc0) >> 6 == %d)" + dstPod1Name = "nqos-dst-pod1" + dstPod2Name = "nqos-dst-pod2" + dstPod3Name = "nqos-dst-pod3" + dstPod4Name = "nqos-dst-pod4" + + bandwidthFluctuation = 1.5 + ) + + var ( + skipIpv4 bool + skipIpv6 bool + dstPodNamespace string + dstNode string + dstPod1IPv4 string + dstPod1IPv6 string + dstPod2IPv4 string + dstPod2IPv6 string + dstPod3IPv4 string + dstPod3IPv6 string + dstPod4IPv4 string + dstPod4IPv6 string + nodeIPv4Range string + nodeIPv6Range string + ) + + f := wrappedTestFramework("networkqos") + + waitForNetworkQoSApplied := func(namespace string) { + gomega.Eventually(func() bool { + output, err := e2ekubectl.RunKubectl(namespace, "get", "networkqos", nqosSpecName) + if err != nil { + framework.Failf("could not get the networkqos default in namespace: %s", namespace) + } + return strings.Contains(output, "NetworkQoS Destinations applied") + }, 10*time.Second).Should(gomega.BeTrue(), fmt.Sprintf("expected networkqos in namespace %s to be successfully applied", namespace)) + } + + ginkgo.BeforeEach(func() { + nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.TODO(), f.ClientSet, 2) + framework.ExpectNoError(err) + if len(nodes.Items) < 2 { + framework.Failf("Test requires >= 2 Ready nodes, but there are only %v nodes", len(nodes.Items)) + } + nodeAddresses := map[string]string{} + err = json.Unmarshal([]byte(nodes.Items[0].Annotations["k8s.ovn.org/node-primary-ifaddr"]), &nodeAddresses) + framework.ExpectNoError(err) + if nodeIP, ok := nodeAddresses["ipv4"]; ok { + _, ipnet, _ := net.ParseCIDR(nodeIP) + nodeIPv4Range = ipnet.String() + skipIpv4 = false + } else { + ginkgo.By("Node IPv4 address not found: Will be skipping IPv4 checks in the Networking QoS test") + nodeIPv4Range = "0.0.0.0/0" + skipIpv4 = true + } + if nodeIP, ok := nodeAddresses["ipv6"]; ok { + _, ipnet, _ := net.ParseCIDR(nodeIP) + nodeIPv6Range = ipnet.String() + skipIpv6 = false + } else { + ginkgo.By("Node IPv6 address not found: Will be skipping IPv6 checks in the Networking QoS test") + nodeIPv6Range = "::/0" + skipIpv6 = true + } + if skipIpv4 && skipIpv6 { + framework.Fail("Neither IPv4 nor IPv6 is configured on the node") + } + dstPodNamespace = f.Namespace.Name + "-dest" + // set up dest namespace + dstNs := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: dstPodNamespace, + Labels: map[string]string{ + "app": "nqos-test", + }, + }, + } + _, err = f.ClientSet.CoreV1().Namespaces().Create(context.Background(), dstNs, metav1.CreateOptions{}) + framework.ExpectNoError(err, "Error creating Namespace %v: %v", dstPodNamespace, err) + + _, err = createPod(f, srcPodName, nodes.Items[0].Name, f.Namespace.Name, []string{"bash", "-c", "sleep infinity"}, map[string]string{"component": "nqos-test-src"}, func(p *corev1.Pod) { + p.Spec.Containers[0].Image = podImage + }) + framework.ExpectNoError(err) + dstNode = nodes.Items[1].Name + }) + + ginkgo.DescribeTable("Should have correct DSCP value for overlay traffic when NetworkQoS is applied", + func(skipThisTableEntry *bool, tcpDumpTpl string, dst1IP, dst2IP, dst3IP, dst4IP *string) { + if *skipThisTableEntry { + return + } + dscpValue := 50 + // dest pod without protocol and port + dstPod1, err := createPod(f, dstPod1Name, dstNode, dstPodNamespace, []string{"bash", "-c", "sleep infinity"}, map[string]string{"component": "nqos-test-dst"}, func(p *corev1.Pod) { + p.Spec.Containers[0].Image = podImage + }) + framework.ExpectNoError(err) + gomega.Eventually(func() error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", dstPod1Name, "--", "which", "tcpdump") + return err + + }, 60*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred()) + dstPod1IPv4, dstPod1IPv6 = getPodAddresses(dstPod1) + + // dest pod covered by tcp without port rule + dstPod2, err := createPod(f, dstPod2Name, dstNode, dstPodNamespace, []string{"bash", "-c", "nc -l -p 9090; sleep infinity"}, map[string]string{"component": "nqos-test-tcp"}, func(p *corev1.Pod) { + p.Spec.Containers[0].Image = podImage + }) + framework.ExpectNoError(err) + gomega.Eventually(func() error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", dstPod1Name, "--", "which", "nc") + return err + + }, 60*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred()) + dstPod2IPv4, dstPod2IPv6 = getPodAddresses(dstPod2) + + // dest pod covered by tcp with port rule + dstPod3, err := createPod(f, dstPod3Name, dstNode, dstPodNamespace, []string{"bash", "-c", "python3 -m http.server 80; sleep infinity"}, map[string]string{"component": "nqos-test-web"}, func(p *corev1.Pod) { + p.Spec.Containers[0].Image = podImage + }) + framework.ExpectNoError(err) + gomega.Eventually(func() error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", dstPod1Name, "--", "which", "python3") + return err + + }, 60*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred()) + dstPod3IPv4, dstPod3IPv6 = getPodAddresses(dstPod3) + + // dest pod not covered by networkqos + dstPod4, err := createPod(f, dstPod4Name, dstNode, dstPodNamespace, []string{"bash", "-c", "sleep infinity"}, nil, func(p *corev1.Pod) { + p.Spec.Containers[0].Image = podImage + }) + framework.ExpectNoError(err) + gomega.Eventually(func() error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", dstPod1Name, "--", "which", "tcpdump") + return err + + }, 60*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred()) + dstPod4IPv4, dstPod4IPv6 = getPodAddresses(dstPod4) + + // no dscp (dscp == 0) should be deteced before networkqos is applied + pingExpectDscp(f, srcPodName, dstPodNamespace, dstPod1Name, *dst1IP, tcpDumpTpl, 0) + netcatExpectDscp(f, srcPodName, dstPodNamespace, dstPod2Name, *dst2IP, tcpDumpTpl, 9090, 0) + netcatExpectDscp(f, srcPodName, dstPodNamespace, dstPod3Name, *dst3IP, tcpDumpTpl, 80, 0) + pingExpectDscp(f, srcPodName, dstPodNamespace, dstPod4Name, *dst4IP, tcpDumpTpl, 0) + + // apply networkqos spec + networkQoSSpec := fmt.Sprintf(` +apiVersion: k8s.ovn.org/v1alpha1 +kind: NetworkQoS +metadata: + namespace: %s + name: %s +spec: + podSelector: + matchLabels: + component: nqos-test-src + priority: 50 + egress: + - dscp: %d + classifier: + to: + - podSelector: + matchLabels: + component: nqos-test-dst + namespaceSelector: + matchLabels: + app: nqos-test + - dscp: %d + classifier: + port: + protocol: TCP + to: + - podSelector: + matchLabels: + component: nqos-test-tcp + namespaceSelector: + matchLabels: + app: nqos-test + - dscp: %d + classifier: + port: + protocol: TCP + port: 80 + to: + - podSelector: + matchLabels: + component: nqos-test-web + namespaceSelector: + matchLabels: + app: nqos-test +`, f.Namespace.Name, nqosSpecName, dscpValue, dscpValue+1, dscpValue+2) + if err := os.WriteFile(networkQoSYaml, []byte(networkQoSSpec), 0644); err != nil { + framework.Failf("Unable to write CRD to disk: %v", err) + } + defer func() { + if err := os.Remove(networkQoSYaml); err != nil { + framework.Logf("Unable to remove the CRD file from disk: %v", err) + } + }() + e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "create", "-f", networkQoSYaml) + framework.Logf("NetworkQoS applied") + waitForNetworkQoSApplied(f.Namespace.Name) + // verify dscp + pingExpectDscp(f, srcPodName, dstPodNamespace, dstPod1Name, *dst1IP, tcpDumpTpl, dscpValue) + netcatExpectDscp(f, srcPodName, dstPodNamespace, dstPod2Name, *dst2IP, tcpDumpTpl, 9090, dscpValue+1) + netcatExpectDscp(f, srcPodName, dstPodNamespace, dstPod3Name, *dst3IP, tcpDumpTpl, 80, dscpValue+2) + pingExpectDscp(f, srcPodName, dstPodNamespace, dstPod4Name, *dst4IP, tcpDumpTpl, 0) + }, + ginkgo.Entry("ipv4", &skipIpv4, tcpdumpIPv4, &dstPod1IPv4, &dstPod2IPv4, &dstPod3IPv4, &dstPod4IPv4), + ginkgo.Entry("ipv6", &skipIpv6, tcpdumpIPv6, &dstPod1IPv6, &dstPod2IPv6, &dstPod3IPv6, &dstPod4IPv6), + ) + + ginkgo.DescribeTable("Should have correct DSCP value for host network traffic when NetworkQoS is applied", + func(skipThisTableEntry *bool, tcpDumpTpl string, dst1IP, dst2IP, dst3IP, dst4IP *string) { + if *skipThisTableEntry { + return + } + dscpValue := 32 + // dest pod to test traffic without protocol and port + dstPod1, err := createPod(f, dstPod1Name, dstNode, dstPodNamespace, []string{"bash", "-c", "sleep infinity"}, nil, func(p *corev1.Pod) { + p.Spec.HostNetwork = true + p.Spec.Containers[0].Image = podImage + }) + framework.ExpectNoError(err) + gomega.Eventually(func() error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", dstPod1Name, "--", "which", "tcpdump") + return err + + }, 60*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred()) + dstPod1IPv4, dstPod1IPv6 = getPodAddresses(dstPod1) + + // dest pod to test traffic with tcp protocol but no port + dstPod2, err := createPod(f, dstPod2Name, dstNode, dstPodNamespace, []string{"bash", "-c", "nc -l -p 9090; sleep infinity"}, nil, func(p *corev1.Pod) { + p.Spec.HostNetwork = true + p.Spec.Containers[0].Image = podImage + }) + framework.ExpectNoError(err) + gomega.Eventually(func() error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", dstPod1Name, "--", "which", "nc") + return err + + }, 60*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred()) + dstPod2IPv4, dstPod2IPv6 = getPodAddresses(dstPod2) + + // dest pod to test traffic with tcp protocol and port + dstPod3, err := createPod(f, dstPod3Name, dstNode, dstPodNamespace, []string{"bash", "-c", "python3 -m http.server 80; sleep infinity"}, nil, func(p *corev1.Pod) { + p.Spec.HostNetwork = true + p.Spec.Containers[0].Image = podImage + }) + framework.ExpectNoError(err) + gomega.Eventually(func() error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", dstPod1Name, "--", "which", "python3") + return err + + }, 60*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred()) + dstPod3IPv4, dstPod3IPv6 = getPodAddresses(dstPod3) + + // dest pod not covered by networkqos + dstPod4, err := createPod(f, dstPod4Name, dstNode, dstPodNamespace, []string{"bash", "-c", "sleep infinity"}, nil, func(p *corev1.Pod) { + p.Spec.Containers[0].Image = podImage + }) + framework.ExpectNoError(err) + gomega.Eventually(func() error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", dstPod1Name, "--", "which", "tcpdump") + return err + + }, 60*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred()) + dstPod4IPv4, dstPod4IPv6 = getPodAddresses(dstPod4) + + // no dscp (dscp == 0) should be deteced before networkqos is applied + pingExpectDscp(f, srcPodName, dstPodNamespace, dstPod1Name, *dst1IP, tcpDumpTpl, 0) + netcatExpectDscp(f, srcPodName, dstPodNamespace, dstPod2Name, *dst2IP, tcpDumpTpl, 9090, 0) + netcatExpectDscp(f, srcPodName, dstPodNamespace, dstPod3Name, *dst3IP, tcpDumpTpl, 80, 0) + pingExpectDscp(f, srcPodName, dstPodNamespace, dstPod4Name, *dst4IP, tcpDumpTpl, 0) + + // apply networkqos spec + networkQoSSpec := fmt.Sprintf(` +apiVersion: k8s.ovn.org/v1alpha1 +kind: NetworkQoS +metadata: + namespace: %s + name: %s +spec: + podSelector: + matchLabels: + component: nqos-test-src + priority: 51 + egress: + - dscp: %d + classifier: + to: + - ipBlock: + cidr: %s + - ipBlock: + cidr: %s + - dscp: %d + classifier: + port: + protocol: TCP + to: + - ipBlock: + cidr: %s + - ipBlock: + cidr: %s + - dscp: %d + classifier: + port: + protocol: TCP + port: 80 + to: + - ipBlock: + cidr: %s + - ipBlock: + cidr: %s +`, f.Namespace.Name, nqosSpecName, dscpValue, nodeIPv4Range, nodeIPv6Range, dscpValue+1, nodeIPv4Range, nodeIPv6Range, dscpValue+2, nodeIPv4Range, nodeIPv6Range) + if err := os.WriteFile(networkQoSYaml, []byte(networkQoSSpec), 0644); err != nil { + framework.Failf("Unable to write CRD to disk: %v", err) + } + defer func() { + if err := os.Remove(networkQoSYaml); err != nil { + framework.Logf("Unable to remove the CRD file from disk: %v", err) + } + }() + e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "create", "-f", networkQoSYaml) + framework.Logf("NetworkQoS applied") + waitForNetworkQoSApplied(f.Namespace.Name) + // verify dscp + pingExpectDscp(f, srcPodName, dstPodNamespace, dstPod1Name, *dst1IP, tcpDumpTpl, dscpValue) + netcatExpectDscp(f, srcPodName, dstPodNamespace, dstPod2Name, *dst2IP, tcpDumpTpl, 9090, dscpValue+1) + netcatExpectDscp(f, srcPodName, dstPodNamespace, dstPod3Name, *dst3IP, tcpDumpTpl, 80, dscpValue+2) + pingExpectDscp(f, srcPodName, dstPodNamespace, dstPod4Name, *dst4IP, tcpDumpTpl, 0) + }, + ginkgo.Entry("ipv4", &skipIpv4, tcpdumpIPv4, &dstPod1IPv4, &dstPod2IPv4, &dstPod3IPv4, &dstPod4IPv4), + ginkgo.Entry("ipv6", &skipIpv6, tcpdumpIPv6, &dstPod1IPv6, &dstPod2IPv6, &dstPod3IPv6, &dstPod4IPv6), + ) + + ginkgo.DescribeTable("Limits egress traffic to all target pods below the specified rate in NetworkQoS spec", + func(skipThisTableEntry *bool, dst1IP, dst2IP *string) { + if *skipThisTableEntry { + return + } + rate := 10000 + // dest pod 1 for test without protocol & port + dstPod1, err := createPod(f, dstPod1Name, dstNode, dstPodNamespace, []string{"bash", "-c", "iperf3 -s"}, map[string]string{"component": "nqos-test-dst"}, func(p *corev1.Pod) { + p.Spec.Containers[0].Image = podImage + }) + framework.ExpectNoError(err) + gomega.Eventually(func() error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", dstPod1Name, "--", "which", "iperf3") + return err + + }, 60*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred()) + dstPod1IPv4, dstPod1IPv6 = getPodAddresses(dstPod1) + // dest pod 2 for test without protocol & port + dstPod2, err := createPod(f, dstPod2Name, dstNode, dstPodNamespace, []string{"bash", "-c", "iperf3 -s"}, map[string]string{"component": "nqos-test-dst"}, func(p *corev1.Pod) { + p.Spec.Containers[0].Image = podImage + }) + framework.ExpectNoError(err) + gomega.Eventually(func() error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", dstPod1Name, "--", "which", "iperf3") + return err + + }, 60*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred()) + dstPod2IPv4, dstPod2IPv6 = getPodAddresses(dstPod2) + + bps := twoStreamIperf3Tests(f, srcPodName, *dst1IP, *dst2IP, 5201) + gomega.Expect(bps/1000 > float64(rate)*bandwidthFluctuation).To(gomega.BeTrue()) + + // apply networkqos spec + networkQoSSpec := fmt.Sprintf(` +apiVersion: k8s.ovn.org/v1alpha1 +kind: NetworkQoS +metadata: + namespace: %s + name: %s +spec: + podSelector: + matchLabels: + component: nqos-test-src + priority: 52 + egress: + - dscp: 1 + bandwidth: + rate: %d + classifier: + to: + - podSelector: + matchLabels: + component: nqos-test-dst + namespaceSelector: + matchLabels: + app: nqos-test +`, f.Namespace.Name, nqosSpecName, rate) + if err := os.WriteFile(networkQoSYaml, []byte(networkQoSSpec), 0644); err != nil { + framework.Failf("Unable to write CRD to disk: %v", err) + } + defer func() { + if err := os.Remove(networkQoSYaml); err != nil { + framework.Logf("Unable to remove the CRD file from disk: %v", err) + } + }() + e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "create", "-f", networkQoSYaml) + framework.Logf("NetworkQoS applied") + waitForNetworkQoSApplied(f.Namespace.Name) + bps = twoStreamIperf3Tests(f, srcPodName, *dst1IP, *dst2IP, 5201) + gomega.Expect(bps/1000 <= float64(rate)*bandwidthFluctuation).To(gomega.BeTrue()) + }, + ginkgo.Entry("ipv4", &skipIpv4, &dstPod1IPv4, &dstPod2IPv4), + ginkgo.Entry("ipv6", &skipIpv6, &dstPod1IPv6, &dstPod2IPv6), + ) + + ginkgo.DescribeTable("Limits egress traffic targeting an individual pod by protocol through a NetworkQoS spec", + func(skipThisTableEntry *bool, dst1IP *string) { + if *skipThisTableEntry { + return + } + rate := 5000 + // dest pod for test with protocol + dstPod1, err := createPod(f, dstPod1Name, dstNode, dstPodNamespace, []string{"bash", "-c", "iperf3 -s"}, map[string]string{"component": "nqos-test-tcp"}, func(p *corev1.Pod) { + p.Spec.Containers[0].Image = podImage + }) + framework.ExpectNoError(err) + gomega.Eventually(func() error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", dstPod1Name, "--", "which", "iperf3") + return err + + }, 60*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred()) + dstPod1IPv4, dstPod1IPv6 = getPodAddresses(dstPod1) + bps := iperf3Test(f, srcPodName, *dst1IP, 5201) + gomega.Expect(bps/1000 > float64(rate)*bandwidthFluctuation).To(gomega.BeTrue()) + // apply networkqos spec + networkQoSSpec := fmt.Sprintf(` +apiVersion: k8s.ovn.org/v1alpha1 +kind: NetworkQoS +metadata: + namespace: %s + name: %s +spec: + podSelector: + matchLabels: + component: nqos-test-src + priority: 53 + egress: + - dscp: 2 + bandwidth: + rate: %d + classifier: + port: + protocol: TCP + + to: + - podSelector: + matchLabels: + component: nqos-test-tcp + namespaceSelector: + matchLabels: + app: nqos-test +`, f.Namespace.Name, nqosSpecName, rate) + if err := os.WriteFile(networkQoSYaml, []byte(networkQoSSpec), 0644); err != nil { + framework.Failf("Unable to write CRD to disk: %v", err) + } + defer func() { + if err := os.Remove(networkQoSYaml); err != nil { + framework.Logf("Unable to remove the CRD file from disk: %v", err) + } + }() + e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "create", "-f", networkQoSYaml) + framework.Logf("NetworkQoS applied") + waitForNetworkQoSApplied(f.Namespace.Name) + bps = iperf3Test(f, srcPodName, *dst1IP, 5201) + gomega.Expect(bps/1000 <= float64(rate)*bandwidthFluctuation).To(gomega.BeTrue()) + }, + ginkgo.Entry("ipv4", &skipIpv4, &dstPod1IPv4), + ginkgo.Entry("ipv6", &skipIpv6, &dstPod1IPv6), + ) + + ginkgo.DescribeTable("Limits egress traffic targeting a pod by protocol and port through a NetworkQoS spec", + func(skipThisTableEntry *bool, dst1IP *string) { + if *skipThisTableEntry { + return + } + rate := 5000 + // dest pod for test with protocol and port + dstPod1, err := createPod(f, dstPod1Name, dstNode, dstPodNamespace, []string{"bash", "-c", "iperf3 -s -p 80"}, map[string]string{"component": "nqos-test-proto-and-port"}, func(p *corev1.Pod) { + p.Spec.Containers[0].Image = podImage + }) + framework.ExpectNoError(err) + gomega.Eventually(func() error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", dstPod1Name, "--", "which", "iperf3") + return err + + }, 60*time.Second, 1*time.Second).ShouldNot(gomega.HaveOccurred()) + dstPod1IPv4, dstPod1IPv6 = getPodAddresses(dstPod1) + bps := iperf3Test(f, srcPodName, *dst1IP, 80) + gomega.Expect(bps/1000 > float64(rate)*bandwidthFluctuation).To(gomega.BeTrue()) + // apply networkqos spec + networkQoSSpec := fmt.Sprintf(` +apiVersion: k8s.ovn.org/v1alpha1 +kind: NetworkQoS +metadata: + namespace: %s + name: %s +spec: + podSelector: + matchLabels: + component: nqos-test-src + priority: 54 + egress: + - dscp: 3 + bandwidth: + rate: %d + classifier: + port: + protocol: TCP + port: 80 + to: + - podSelector: + matchLabels: + component: nqos-test-proto-and-port + namespaceSelector: + matchLabels: + app: nqos-test +`, f.Namespace.Name, nqosSpecName, rate) + if err := os.WriteFile(networkQoSYaml, []byte(networkQoSSpec), 0644); err != nil { + framework.Failf("Unable to write CRD to disk: %v", err) + } + defer func() { + if err := os.Remove(networkQoSYaml); err != nil { + framework.Logf("Unable to remove the CRD file from disk: %v", err) + } + }() + e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "create", "-f", networkQoSYaml) + framework.Logf("NetworkQoS applied") + waitForNetworkQoSApplied(f.Namespace.Name) + bps = iperf3Test(f, srcPodName, *dst1IP, 80) + gomega.Expect(bps/1000 <= float64(rate)*bandwidthFluctuation).To(gomega.BeTrue()) + }, + ginkgo.Entry("ipv4", &skipIpv4, &dstPod1IPv4), + ginkgo.Entry("ipv6", &skipIpv6, &dstPod1IPv6), + ) + + ginkgo.AfterEach(func() { + err := f.ClientSet.CoreV1().Namespaces().Delete(context.Background(), dstPodNamespace, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "Error deleting Namespace %v: %v", dstPodNamespace, err) + }) +}) + +func pingExpectDscp(f *framework.Framework, srcPod, dstPodNamespace, dstPod, dstPodIP, tcpDumpTpl string, dscp int) { + tcpDumpSync := errgroup.Group{} + pingSync := errgroup.Group{} + + checkDSCPOnPod := func(pod string, dscp int) error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", pod, "--", "timeout", "10", + "tcpdump", "-i", "any", "-c", "1", "-v", fmt.Sprintf(tcpDumpTpl, dscp)) + return err + } + + pingFromSrcPod := func(pod, dst string) error { + _, err := e2ekubectl.RunKubectl(f.Namespace.Name, "exec", pod, "--", "ping", "-c", "5", dst) + return err + } + + tcpDumpSync.Go(func() error { + return checkDSCPOnPod(dstPod, dscp) + }) + pingSync.Go(func() error { + return pingFromSrcPod(srcPod, dstPodIP) + }) + err := pingSync.Wait() + framework.ExpectNoError(err, "Failed to ping dst pod") + err = tcpDumpSync.Wait() + framework.ExpectNoError(err, "Failed to detect ping with correct DSCP on pod") +} + +func netcatExpectDscp(f *framework.Framework, srcPod, dstPodNamespace, dstPod, dstPodIP, tcpDumpTpl string, port, dscp int) { + tcpDumpSync := errgroup.Group{} + netcatSync := errgroup.Group{} + + checkDSCPOnPod := func(pod string, dscp int) error { + _, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", pod, "--", "timeout", "10", + "tcpdump", "-i", "any", "-c", "1", "-v", fmt.Sprintf(tcpDumpTpl, dscp)) + return err + } + + netcatFromSrcPod := func(pod, dst string) error { + _, err := e2ekubectl.RunKubectl(f.Namespace.Name, "exec", pod, "--", "bash", "-c", fmt.Sprintf("for i in {1..5}; do nc -vz -w 1 %s %d; sleep 1; done", dst, port)) + return err + } + + tcpDumpSync.Go(func() error { + return checkDSCPOnPod(dstPod, dscp) + }) + netcatSync.Go(func() error { + return netcatFromSrcPod(srcPod, dstPodIP) + }) + err := netcatSync.Wait() + framework.ExpectNoError(err, "Failed to connect to dst pod") + err = tcpDumpSync.Wait() + framework.ExpectNoError(err, "Failed to detect packets with correct DSCP on pod") +} + +func iperf3Test(f *framework.Framework, srcPod, dstIP string, port int, protocol ...string) float64 { + iperf3Sync := errgroup.Group{} + + iperfTest := func(pod, destIP string, port int, bps *float64) error { + args := []string{"exec", pod, "--", "iperf3", "-c", destIP, "-p", strconv.Itoa(port), "-J"} + if len(protocol) > 0 && protocol[0] == "udp" { + args = append(args, "-u", "-b", "0") + } + output, err := e2ekubectl.RunKubectl(f.Namespace.Name, args...) + if err != nil { + return err + } + var data map[string]interface{} + err = json.Unmarshal([]byte(output), &data) + if err != nil { + return err + } + end := data["end"].(map[string]interface{}) + if sum_sent, ok := end["sum_sent"]; ok { + *bps = sum_sent.(map[string]interface{})["bits_per_second"].(float64) + } else if sum, ok := end["sum"]; ok { + *bps = sum.(map[string]interface{})["bits_per_second"].(float64) + } + return nil + } + bps := 0.0 + iperf3Sync.Go(func() error { + return iperfTest(srcPod, dstIP, port, &bps) + }) + err := iperf3Sync.Wait() + framework.ExpectNoError(err, fmt.Sprintf("Failed to run iperf3 test for IP %s", dstIP)) + return bps +} + +func twoStreamIperf3Tests(f *framework.Framework, srcPod, dstPod1IP, dstPod2IP string, port int) float64 { + iperf3Sync1 := errgroup.Group{} + iperf3Sync2 := errgroup.Group{} + + iperfTest := func(pod, destIP string, port int, bps *float64) error { + output, err := e2ekubectl.RunKubectl(f.Namespace.Name, "exec", pod, "--", "iperf3", "-c", destIP, "-p", strconv.Itoa(port), "-J") + if err != nil { + return err + } + var data map[string]interface{} + err = json.Unmarshal([]byte(output), &data) + if err != nil { + return err + } + end := data["end"].(map[string]interface{}) + sum_sent := end["sum_sent"].(map[string]interface{}) + *bps = sum_sent["bits_per_second"].(float64) + return nil + } + + bps1 := 0.0 + bps2 := 0.0 + + iperf3Sync1.Go(func() error { + return iperfTest(srcPod, dstPod1IP, port, &bps1) + }) + iperf3Sync2.Go(func() error { + return iperfTest(srcPod, dstPod2IP, port, &bps2) + }) + err := iperf3Sync1.Wait() + framework.ExpectNoError(err, fmt.Sprintf("Failed to run iperf3 test for IP %s", dstPod1IP)) + err = iperf3Sync2.Wait() + framework.ExpectNoError(err, fmt.Sprintf("Failed to run iperf3 test for IP %s", dstPod2IP)) + return bps1 + bps2 +} + +func pingExpectNoDscp(f *framework.Framework, srcPod, dstPodNamespace, dstPod, dstPodIP, tcpDumpTpl string, dscp int) { + tcpDumpSync := errgroup.Group{} + pingSync := errgroup.Group{} + + checkDSCPOnPod := func(pod string, dscp int) error { + output, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", pod, "--", "timeout", "10", + "tcpdump", "-i", "any", "-c", "1", "-v", fmt.Sprintf(tcpDumpTpl, dscp)) + if err != nil { + return err + } + if len(strings.TrimSpace(output)) == 0 { + return fmt.Errorf("no packets captured") + } + return nil + } + + pingFromSrcPod := func(pod, dst string) error { + _, err := e2ekubectl.RunKubectl(f.Namespace.Name, "exec", pod, "--", "ping", "-c", "5", dst) + return err + } + + tcpDumpSync.Go(func() error { + return checkDSCPOnPod(dstPod, dscp) + }) + pingSync.Go(func() error { + return pingFromSrcPod(srcPod, dstPodIP) + }) + err := pingSync.Wait() + gomega.Expect(err).To(gomega.BeNil()) + err = tcpDumpSync.Wait() + gomega.Expect(err).To(gomega.HaveOccurred()) +} + +func netcatExpectNoDscp(f *framework.Framework, srcPod, dstPodNamespace, dstPod, dstPodIP, tcpDumpTpl string, port, dscp int) { + tcpDumpSync := errgroup.Group{} + netcatSync := errgroup.Group{} + + checkDSCPOnPod := func(pod string, dscp int) error { + output, err := e2ekubectl.RunKubectl(dstPodNamespace, "exec", pod, "--", "timeout", "10", + "tcpdump", "-i", "any", "-c", "1", "-v", fmt.Sprintf(tcpDumpTpl, dscp)) + if err != nil { + return err + } + if len(strings.TrimSpace(output)) == 0 { + return fmt.Errorf("no packets captured") + } + return nil + } + + netcatFromSrcPod := func(pod, dst string) error { + _, err := e2ekubectl.RunKubectl(f.Namespace.Name, "exec", pod, "--", "bash", "-c", fmt.Sprintf("for i in {1..5}; do nc -vz -w 1 %s %d; sleep 1; done", dst, port)) + return err + } + + tcpDumpSync.Go(func() error { + return checkDSCPOnPod(dstPod, dscp) + }) + netcatSync.Go(func() error { + return netcatFromSrcPod(srcPod, dstPodIP) + }) + err := netcatSync.Wait() + framework.ExpectNoError(err, "Failed to connect to dst pod") + err = tcpDumpSync.Wait() + gomega.Expect(err).To(gomega.HaveOccurred()) +} From 16ce81e580f1102a3b7f7264f1bec1b5a044929a Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Mon, 30 Sep 2024 15:13:20 +0000 Subject: [PATCH 08/18] gh, actions: Set OVN_NETWORK_QOS_ENABLE in control-plane jobs e2e skip test: Network QoS feature depends on multi-homing to exercise secondary NAD Signed-off-by: Flavio Fernandes (cherry picked from commit 606206f4a6c2ee644569411347fb463c6da11698) --- .github/workflows/test.yml | 5 ++++- test/scripts/e2e-cp.sh | 7 +++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d76045fe8e..2da2a873ae 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -489,6 +489,7 @@ jobs: OVN_DISABLE_FORWARDING: "${{ matrix.forwarding == 'disable-forwarding' }}" USE_HELM: "${{ matrix.target == 'control-plane-helm' || matrix.target == 'multi-homing-helm' }}" OVN_ENABLE_DNSNAMERESOLVER: "${{ matrix.dns-name-resolver == 'enable-dns-name-resolver' }}" + OVN_NETWORK_QOS_ENABLE: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' }}" TRAFFIC_FLOW_TESTS: "${{ matrix.traffic-flow-tests }}" ENABLE_ROUTE_ADVERTISEMENTS: "${{ matrix.routeadvertisements != '' }}" ADVERTISE_DEFAULT_NETWORK: "${{ matrix.routeadvertisements == 'advertise-default' }}" @@ -629,7 +630,9 @@ jobs: make -C test control-plane WHAT="Kubevirt Virtual Machines" elif [ "${{ matrix.target }}" == "control-plane-helm" ]; then make -C test control-plane - make -C test conformance + if [ "${{ matrix.ipfamily }}" != "ipv6" ]; then + make -C test conformance + fi elif [ "${{ matrix.target }}" == "network-segmentation" ]; then make -C test control-plane WHAT="Network Segmentation" elif [ "${{ matrix.target }}" == "bgp" ]; then diff --git a/test/scripts/e2e-cp.sh b/test/scripts/e2e-cp.sh index d7dc3b5806..da692db253 100755 --- a/test/scripts/e2e-cp.sh +++ b/test/scripts/e2e-cp.sh @@ -115,6 +115,13 @@ if [ "$ENABLE_MULTI_NET" != "true" ]; then SKIPPED_TESTS+="Multi Homing" fi +if [ "$OVN_NETWORK_QOS_ENABLE" != "true" ]; then + if [ "$SKIPPED_TESTS" != "" ]; then + SKIPPED_TESTS+="|" + fi + SKIPPED_TESTS+="e2e NetworkQoS validation" +fi + # Only run Node IP/MAC address migration tests if they are explicitly requested IP_MIGRATION_TESTS="Node IP and MAC address migration" if [[ "${WHAT}" != "${IP_MIGRATION_TESTS}"* ]]; then From f500120818cd3d71f71413d555690335e39f5944 Mon Sep 17 00:00:00 2001 From: Xiaobin Qu Date: Fri, 28 Feb 2025 22:20:57 -0800 Subject: [PATCH 09/18] address review comments. misc changes to address review comments. Signed-off-by: Xiaobin Qu Signed-off-by: Flavio Fernandes (cherry picked from commit 9770cb18ff459c8905f65db151976318a4eb0154) --- .../pkg/ovn/controller/network_qos/metrics.go | 16 ++++++ .../ovn/controller/network_qos/network_qos.go | 56 ++++++++++--------- .../network_qos/network_qos_controller.go | 3 - .../controller/network_qos/network_qos_pod.go | 6 +- 4 files changed, 48 insertions(+), 33 deletions(-) diff --git a/go-controller/pkg/ovn/controller/network_qos/metrics.go b/go-controller/pkg/ovn/controller/network_qos/metrics.go index 05aada1eb8..bd04523190 100644 --- a/go-controller/pkg/ovn/controller/network_qos/metrics.go +++ b/go-controller/pkg/ovn/controller/network_qos/metrics.go @@ -60,6 +60,16 @@ var ( }, []string{"network"}, ) + + nqosStatusPatchDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: metrics.MetricOvnkubeNamespace, + Subsystem: metrics.MetricOvnkubeSubsystemController, + Name: "nqos_status_patch_duration_ms", + Help: "Time spent on patching the status of a NetworkQoS", + }, + []string{"network"}, + ) ) func init() { @@ -69,6 +79,7 @@ func init() { nqosReconcileDuration, nqosPodReconcileDuration, nqosNamespaceReconcileDuration, + nqosStatusPatchDuration, ) } @@ -100,3 +111,8 @@ func recordNamespaceReconcileDuration(network string, duration int64) { func recordOvnOperationDuration(operationType string, duration int64) { nqosOvnOperationDuration.WithLabelValues(operationType).Observe(float64(duration)) } + +// records time spent on patching the status of a NetworkQoS +func recordStatusPatchDuration(network string, duration int64) { + nqosStatusPatchDuration.WithLabelValues(network).Observe(float64(duration)) +} diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos.go b/go-controller/pkg/ovn/controller/network_qos/network_qos.go index 4c542b5fe6..d82b24944a 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos.go @@ -36,7 +36,7 @@ func (c *Controller) processNextNQOSWorkItem(wg *sync.WaitGroup) bool { c.nqosQueue.Forget(nqosKey) return true } - utilruntime.HandleError(fmt.Errorf("%v failed with: %v", nqosKey, err)) + utilruntime.HandleError(fmt.Errorf("%s: failed to handle key %s, error: %v", c.controllerName, nqosKey, err)) if c.nqosQueue.NumRequeues(nqosKey) < maxRetries { c.nqosQueue.AddRateLimited(nqosKey) @@ -66,27 +66,27 @@ func (c *Controller) syncNetworkQoS(key string) error { return err } if nqos == nil { - klog.V(5).Infof("%s - NetworkQoS %s has gone", c.controllerName, key) + klog.V(6).Infof("%s - NetworkQoS %s has gone", c.controllerName, key) return c.nqosCache.DoWithLock(key, func(nqosKey string) error { return c.clearNetworkQos(nqosNamespace, nqosName) }) - } else { - if !c.networkManagedByMe(nqos.Spec.NetworkAttachmentRefs) { - // maybe NetworkAttachmentName has been changed from this one to other value, try cleanup anyway - return c.nqosCache.DoWithLock(key, func(nqosKey string) error { - return c.clearNetworkQos(nqosNamespace, nqosName) - }) - } } + if !c.networkManagedByMe(nqos.Spec.NetworkAttachmentRefs) { + // maybe NetworkAttachmentName has been changed from this one to other value, try cleanup anyway + return c.nqosCache.DoWithLock(key, func(nqosKey string) error { + return c.clearNetworkQos(nqosNamespace, nqosName) + }) + } + klog.V(5).Infof("%s - Processing NetworkQoS %s/%s", c.controllerName, nqos.Namespace, nqos.Name) - // save key to avoid racing - c.nqosCache.Store(key, nil) // at this stage the NQOS exists in the cluster return c.nqosCache.DoWithLock(key, func(nqosKey string) error { + // save key to avoid racing + c.nqosCache.Store(key, nil) if err = c.ensureNetworkQos(nqos); err != nil { c.nqosCache.Delete(key) // we can ignore the error if status update doesn't succeed; best effort - c.updateNQOSStatusToNotReady(nqos.Namespace, nqos.Name, "failed to enforce", err) + c.updateNQOSStatusToNotReady(nqos.Namespace, nqos.Name, "failed to reconcile", err) return err } recordNetworkQoSReconcileDuration(c.controllerName, time.Since(startTime).Milliseconds()) @@ -98,6 +98,7 @@ func (c *Controller) syncNetworkQoS(key string) error { // ensureNetworkQos will handle the main reconcile logic for any given nqos's // add/update that might be triggered either due to NQOS changes or the corresponding // matching pod or namespace changes. +// This function need to be called with a lock held. func (c *Controller) ensureNetworkQos(nqos *networkqosapi.NetworkQoS) error { desiredNQOSState := &networkQoSState{ name: nqos.Name, @@ -131,23 +132,20 @@ func (c *Controller) ensureNetworkQos(nqos *networkqosapi.NetworkQoS) error { destStates := []*Destination{} for _, destSpec := range ruleSpec.Classifier.To { if destSpec.IPBlock != nil && (destSpec.PodSelector != nil || destSpec.NamespaceSelector != nil) { - c.updateNQOSStatusToNotReady(nqos.Namespace, nqos.Name, "specifying both ipBlock and podSelector/namespaceSelector is not allowed", nil) - return nil + return fmt.Errorf("specifying both ipBlock and podSelector/namespaceSelector is not allowed") } destState := &Destination{} destState.IpBlock = destSpec.IPBlock.DeepCopy() if destSpec.NamespaceSelector != nil && (len(destSpec.NamespaceSelector.MatchLabels) > 0 || len(destSpec.NamespaceSelector.MatchExpressions) > 0) { if selector, err := metav1.LabelSelectorAsSelector(destSpec.NamespaceSelector); err != nil { - c.updateNQOSStatusToNotReady(nqos.Namespace, nqos.Name, "failed to parse destination namespace selector", err) - return nil + return fmt.Errorf("error parsing destination namespace selector: %v", err) } else { destState.NamespaceSelector = selector } } if destSpec.PodSelector != nil && (len(destSpec.PodSelector.MatchLabels) > 0 || len(destSpec.PodSelector.MatchExpressions) > 0) { if selector, err := metav1.LabelSelectorAsSelector(destSpec.PodSelector); err != nil { - c.updateNQOSStatusToNotReady(nqos.Namespace, nqos.Name, "failed to parse destination pod selector", err) - return nil + return fmt.Errorf("error parsing destination pod selector: %v", err) } else { destState.PodSelector = selector } @@ -160,7 +158,7 @@ func (c *Controller) ensureNetworkQos(nqos *networkqosapi.NetworkQoS) error { if ruleSpec.Classifier.Port.Protocol != "" { ruleState.Classifier.Protocol = protocol(ruleSpec.Classifier.Port.Protocol) if !ruleState.Classifier.Protocol.IsValid() { - return fmt.Errorf("invalid protocol: %s, valid values are: tcp, udp, sctp", ruleSpec.Classifier.Port.Protocol) + return fmt.Errorf("invalid protocol: %s, valid values are: TCP, UDP, SCTP", ruleSpec.Classifier.Port.Protocol) } } if ruleSpec.Classifier.Port.Port > 0 { @@ -182,14 +180,15 @@ func (c *Controller) ensureNetworkQos(nqos *networkqosapi.NetworkQoS) error { } c.nqosCache.Store(joinMetaNamespaceAndName(nqos.Namespace, nqos.Name), desiredNQOSState) if e := c.updateNQOSStatusToReady(nqos.Namespace, nqos.Name); e != nil { - return fmt.Errorf("NetworkQoS %s/%s reconciled successfully but unable to patch status: %v", nqos.Namespace, nqos.Name, e) + return fmt.Errorf("successfully reconciled NetworkQoS %s/%s, but failed to patch status: %v", nqos.Namespace, nqos.Name, e) } return nil } // clearNetworkQos will handle the logic for deleting all db objects related -// to the provided nqos which got deleted. -// uses externalIDs to figure out ownership +// to the provided nqos which got deleted. it looks up object in OVN by comparing +// the nqos name with the metadata in externalIDs. +// this function need to be called with a lock held. func (c *Controller) clearNetworkQos(nqosNamespace, nqosName string) error { k8sFullName := joinMetaNamespaceAndName(nqosNamespace, nqosName) ovnObjectName := joinMetaNamespaceAndName(nqosNamespace, nqosName, ":") @@ -217,12 +216,14 @@ func (c *Controller) updateNQOSStatusToReady(namespace, name string) error { Reason: reasonQoSSetupSuccess, Message: "NetworkQoS was applied successfully", } + startTime := time.Now() err := c.updateNQOStatusCondition(cond, namespace, name) if err != nil { return fmt.Errorf("failed to update the status of NetworkQoS %s/%s, err: %v", namespace, name, err) } - klog.V(5).Infof("Patched the status of NetworkQoS %s/%s with condition type %v/%v", - namespace, name, conditionTypeReady+c.zone, metav1.ConditionTrue) + klog.V(5).Infof("%s: successfully patched the status of NetworkQoS %s/%s with condition type %v/%v in %v seconds", + c.controllerName, namespace, name, conditionTypeReady+c.zone, metav1.ConditionTrue, time.Since(startTime).Seconds()) + recordStatusPatchDuration(c.controllerName, time.Since(startTime).Milliseconds()) return nil } @@ -238,11 +239,13 @@ func (c *Controller) updateNQOSStatusToNotReady(namespace, name, reason string, Message: msg, } klog.Error(msg) + startTime := time.Now() err = c.updateNQOStatusCondition(cond, namespace, name) if err != nil { - klog.Warningf("Failed to update the status of NetworkQoS %s/%s, err: %v", namespace, name, err) + klog.Warningf("%s: failed to update the status of NetworkQoS %s/%s, err: %v", c.controllerName, namespace, name, err) } else { - klog.V(6).Infof("Patched the status of NetworkQoS %s/%s with condition type %v/%v", namespace, name, conditionTypeReady+c.zone, metav1.ConditionTrue) + klog.V(6).Infof("%s: successfully patched status of NetworkQoS %s/%s with condition type %v/%v in %v seconds", c.controllerName, namespace, name, conditionTypeReady+c.zone, metav1.ConditionTrue, time.Since(startTime).Seconds()) + recordStatusPatchDuration(c.controllerName, time.Since(startTime).Milliseconds()) } } @@ -308,7 +311,6 @@ func (c *Controller) networkManagedByMe(nadRefs []corev1.ObjectReference) bool { (!c.IsDefault() && c.HasNAD(nadKey)) { return true } - klog.V(6).Infof("Net-attach-def %s is not managed by controller %s ", nadKey, c.controllerName) } return false } diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go index 61f928b3d0..6a0bcee130 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go @@ -290,7 +290,6 @@ func (c *Controller) onNQOSAdd(obj interface{}) { utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) return } - klog.V(4).Infof("Adding Network QoS %s", key) c.nqosQueue.Add(key) } @@ -323,7 +322,6 @@ func (c *Controller) onNQOSDelete(obj interface{}) { utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) return } - klog.V(4).Infof("Deleting Network QoS %s", key) c.nqosQueue.Add(key) } @@ -334,7 +332,6 @@ func (c *Controller) onNQOSNamespaceAdd(obj interface{}) { utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) return } - klog.V(5).Infof("Adding Namespace in Network QoS controller %s", key) c.nqosNamespaceQueue.Add(key) } diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go index 88ac75c4dd..0c901abd99 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go @@ -85,7 +85,7 @@ func (c *Controller) syncNetworkQoSPod(key string) error { recordPodReconcileDuration(c.controllerName, time.Since(startTime).Milliseconds()) return nil } - // We don't want to shortcuit only local zone pods here since peer pods + // We don't want to shortcut only local zone pods here since peer pods // whether local or remote need to be dealt with. So we let the main // NQOS controller take care of the local zone pods logic for the policy subjects if !util.PodScheduled(pod) || util.PodWantsHostNetwork(pod) { @@ -139,13 +139,13 @@ func (c *Controller) clearPodForNQOS(namespace, name string, nqosState *networkQ return nil } -// setPodForNQOS wil lcheck if the pod meets source selector or dest selector +// setPodForNQOS will check if the pod meets source selector or dest selector // - match source: add the ip to source address set, bind qos rule to the switch // - match dest: add the ip to the destination address set func (c *Controller) setPodForNQOS(pod *v1.Pod, nqosState *networkQoSState, namespace *v1.Namespace) error { addresses, err := getPodAddresses(pod, c.NetInfo) if err == nil && len(addresses) == 0 { - // pod hasn't been annotated with addresses yet, return without retry + // pod either is not attached to this network, or hasn't been annotated with addresses yet, return without retry klog.V(6).Infof("Pod %s/%s doesn't have addresses on network %s, skip NetworkQoS processing", pod.Namespace, pod.Name, c.GetNetworkName()) return nil } else if err != nil { From e43fa27ceadffda4d5af80b956c1cc81e3a04333 Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Fri, 11 Apr 2025 22:37:04 +0000 Subject: [PATCH 10/18] Update networkqos okep to reflect latest changes from code review Signed-off-by: Flavio Fernandes (cherry picked from commit c561de8528c78520117a8952262896b79799b1b1) --- docs/okeps/okep-4380-network-qos.md | 250 +++++++++++++++------------- 1 file changed, 130 insertions(+), 120 deletions(-) diff --git a/docs/okeps/okep-4380-network-qos.md b/docs/okeps/okep-4380-network-qos.md index 0f5384808a..d792bbd4ce 100644 --- a/docs/okeps/okep-4380-network-qos.md +++ b/docs/okeps/okep-4380-network-qos.md @@ -54,7 +54,7 @@ Another strategy for providing differential treatment to workload network traffi packets using DSCP (a 6-bit field in the IP header). These marked packets can then be handled differently by in-zone and in-cluster services. OVN supports this packet marking capability through OVS, allowing traffic to be classified based on specific match criteria. OVN marks the inner -packet’s IP header. So, the marking appears inside the GENEVE tunnel. There are ways to transfer +packet's IP header. So, the marking appears inside the GENEVE tunnel. There are ways to transfer this marking to outer header and influence how the underlay network fabric should handle such packets, however that is outside the scope of this proposal. @@ -156,9 +156,10 @@ whether NetworkQoS rules are configured correctly in OVN or not. ```go import ( -corev1 "k8s.io/api/core/v1" -networkingv1 "k8s.io/api/networking/v1" -metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + crdtypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" ) // +genclient @@ -175,141 +176,141 @@ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // is marked with relevant DSCP value and enforcing specified policing // parameters. type NetworkQoS struct { -metav1.TypeMeta `json:",inline"` -metav1.ObjectMeta `json:"metadata,omitempty"` + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` -Spec Spec `json:"spec,omitempty"` -Status Status `json:"status,omitempty"` + Spec Spec `json:"spec,omitempty"` + Status Status `json:"status,omitempty"` } // Spec defines the desired state of NetworkQoS type Spec struct { -// netAttachRefs points to a list of objects which could be either NAD, UDN, or Cluster UDN. -// In the case of NAD, the network type could be of type Layer-3, Layer-2, or Localnet. -// If not specified, then the primary network of the selected Pods will be chosen. -// +optional -// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="netAttachRefs is immutable" -NetworkAttachmentRefs []corev1.ObjectReference `json:"netAttachRefs,omitempty"` - -// podSelector applies the NetworkQoS rule only to the pods in the namespace whose label -// matches this definition. This field is optional, and in case it is not set -// results in the rule being applied to all pods in the namespace. -// +optional -PodSelector metav1.LabelSelector `json:"podSelector,omitempty"` - -// priority is a value from 0 to 100 and represents the NetworkQoS' priority. -// QoSes with numerically higher priority takes precedence over those with lower. -// +kubebuilder:validation:Maximum:=100 -// +kubebuilder:validation:Minimum:=0 -Priority int `json:"priority"` - -// egress a collection of Egress NetworkQoS rule objects. A total of 20 rules will -// be allowed in each NetworkQoS instance. The relative precedence of egress rules -// within a single NetworkQos object (all of which share the priority) will be -// determined by the order in which the rule is written. Thus, a rule that appears -// first in the list of egress rules would take the lower precedence. -Egress []Rule `json:"egress"` + // networkSelector selects the networks on which the pod IPs need to be added to the source address set. + // NetworkQoS controller currently supports `NetworkAttachmentDefinitions` type only. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="networkSelector is immutable" + NetworkSelectors crdtypes.NetworkSelectors `json:"networkSelectors,omitempty"` + + // podSelector applies the NetworkQoS rule only to the pods in the namespace whose label + // matches this definition. This field is optional, and in case it is not set + // results in the rule being applied to all pods in the namespace. + // +optional + PodSelector metav1.LabelSelector `json:"podSelector,omitempty"` + + // priority is a value from 0 to 100 and represents the NetworkQoS' priority. + // QoSes with numerically higher priority takes precedence over those with lower. + // +kubebuilder:validation:Maximum:=100 + // +kubebuilder:validation:Minimum:=0 + Priority int `json:"priority"` + + // egress a collection of Egress NetworkQoS rule objects. A total of 20 rules will + // be allowed in each NetworkQoS instance. The relative precedence of egress rules + // within a single NetworkQos object (all of which share the priority) will be + // determined by the order in which the rule is written. Thus, a rule that appears + // first in the list of egress rules would take the lower precedence. + // +kubebuilder:validation:MaxItems=20 + Egress []Rule `json:"egress"` } type Rule struct { -// dscp marking value for matching pods' traffic. -// +kubebuilder:validation:Maximum:=63 -// +kubebuilder:validation:Minimum:=0 -DSCP int `json:"dscp"` - -// classifier The classifier on which packets should match -// to apply the NetworkQoS Rule. -// This field is optional, and in case it is not set the rule is applied -// to all egress traffic regardless of the destination. -// +optional -Classifier Classifier `json:"classifier"` - -// +optional -Bandwidth Bandwidth `json:"bandwidth"` + // dscp marking value for matching pods' traffic. + // +kubebuilder:validation:Maximum:=63 + // +kubebuilder:validation:Minimum:=0 + DSCP int `json:"dscp"` + + // classifier The classifier on which packets should match + // to apply the NetworkQoS Rule. + // This field is optional, and in case it is not set the rule is applied + // to all egress traffic regardless of the destination. + // +optional + Classifier Classifier `json:"classifier"` + + // +optional + Bandwidth Bandwidth `json:"bandwidth"` } type Classifier struct { -// +optional -To []Destination `json:"to"` + // +optional + To []Destination `json:"to"` -// +optional -Port Port `json:"port"` + // +optional + Ports []*Port `json:"ports"` } // Bandwidth controls the maximum of rate traffic that can be sent // or received on the matching packets. type Bandwidth struct { -// rate The value of rate limit in kbps. Traffic over the limit -// will be dropped. -// +kubebuilder:validation:Minimum:=1 -// +kubebuilder:validation:Maximum:=4294967295 -// +optional -Rate uint32 `json:"rate"` - -// burst The value of burst rate limit in kilobits. -// This also needs rate to be specified. -// +kubebuilder:validation:Minimum:=1 -// +kubebuilder:validation:Maximum:=4294967295 -// +optional -Burst uint32 `json:"burst"` + // rate The value of rate limit in kbps. Traffic over the limit + // will be dropped. + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=4294967295 + // +optional + Rate uint32 `json:"rate"` + + // burst The value of burst rate limit in kilobits. + // This also needs rate to be specified. + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=4294967295 + // +optional + Burst uint32 `json:"burst"` } // Port specifies destination protocol and port on which NetworkQoS // rule is applied type Port struct { -// protocol (tcp, udp, sctp) that the traffic must match. -// +kubebuilder:validation:Pattern=^TCP|UDP|SCTP$ -// +optional -Protocol string `json:"protocol"` - -// port that the traffic must match -// +kubebuilder:validation:Minimum:=1 -// +kubebuilder:validation:Maximum:=65535 -// +optional -Port int32 `json:"port"` + // protocol (tcp, udp, sctp) that the traffic must match. + // +kubebuilder:validation:Pattern=^TCP|UDP|SCTP$ + // +optional + Protocol string `json:"protocol"` + + // port that the traffic must match + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=65535 + // +optional + Port *int32 `json:"port"` } // Destination describes a peer to apply NetworkQoS configuration for the outgoing traffic. // Only certain combinations of fields are allowed. // +kubebuilder:validation:XValidation:rule="!(has(self.ipBlock) && (has(self.podSelector) || has(self.namespaceSelector)))",message="Can't specify both podSelector/namespaceSelector and ipBlock" type Destination struct { -// podSelector is a label selector which selects pods. This field follows standard label -// selector semantics; if present but empty, it selects all pods. -// -// If namespaceSelector is also set, then the NetworkQoS as a whole selects -// the pods matching podSelector in the Namespaces selected by NamespaceSelector. -// Otherwise it selects the pods matching podSelector in the NetworkQoS's own namespace. -// +optional -PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` - -// namespaceSelector selects namespaces using cluster-scoped labels. This field follows -// standard label selector semantics; if present but empty, it selects all namespaces. -// -// If podSelector is also set, then the NetworkQoS as a whole selects -// the pods matching podSelector in the namespaces selected by namespaceSelector. -// Otherwise it selects all pods in the namespaces selected by namespaceSelector. -// +optional -NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` - -// ipBlock defines policy on a particular IPBlock. If this field is set then -// neither of the other fields can be. -// +optional -IPBlock *networkingv1.IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` + // podSelector is a label selector which selects pods. This field follows standard label + // selector semantics; if present but empty, it selects all pods. + // + // If namespaceSelector is also set, then the NetworkQoS as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the NetworkQoS's own namespace. + // +optional + PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` + + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. + // + // If podSelector is also set, then the NetworkQoS as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` + + // ipBlock defines policy on a particular IPBlock. If this field is set then + // neither of the other fields can be. + // +optional + IPBlock *networkingv1.IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` } // Status defines the observed state of NetworkQoS type Status struct { -// A concise indication of whether the NetworkQoS resource is applied with success. -// +optional -Status string `json:"status,omitempty"` - -// An array of condition objects indicating details about status of NetworkQoS object. -// +optional -// +patchMergeKey=type -// +patchStrategy=merge -// +listType=map -// +listMapKey=type -Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // A concise indication of whether the NetworkQoS resource is applied with success. + // +optional + Status string `json:"status,omitempty"` + + // An array of condition objects indicating details about status of NetworkQoS object. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -317,9 +318,9 @@ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" // +kubebuilder::singular=networkqos // NetworkQoSList contains a list of NetworkQoS type NetworkQoSList struct { -metav1.TypeMeta `json:",inline"` -metav1.ListMeta `json:"metadata,omitempty"` -Items []NetworkQoS `json:"items"` + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NetworkQoS `json:"items"` } ``` @@ -380,12 +381,12 @@ spec: - dscp: 11 classifier: to: - - ipBlock: - cidr: 0.0.0.0/0 - except: - - 10.0.0.0/8 - - 172.16.0.0/12 - - 192.168.0.0/16 + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 ``` the equivalent of: @@ -456,10 +457,14 @@ metadata: name: qos-external-free namespace: games spec: - netAttachRefs: - - kind: NetworkAttachmentDefinition - namespace: default - name: ovn-storage + networkSelectors: + - networkSelectionType: NetworkAttachmentDefinitions + networkAttachmentDefinitionSelector: + namespaceSelector: + matchLabels: {} # Empty selector will select all namespaces + networkSelector: + matchLabels: + name: ovn-storage priority: 2 egress: - dscp: 11 @@ -467,6 +472,11 @@ spec: to: - ipBlock: cidr: 0.0.0.0/0 + ports: + - protocol: TCP + port: 80 + - protocol: TCP + port: 443 ``` This creates a new AddressSet adding default namespace pod(s) IP associated with ovn-storage From f54a58c918152d6967a365ae873075cf7385e65b Mon Sep 17 00:00:00 2001 From: jxiaobin Date: Tue, 11 Mar 2025 13:10:49 -0700 Subject: [PATCH 11/18] networkqos: use NetworkSelector to match net-attach-defs (#8) use NetworkSelector spec to match net-attach-defs instead of explicitly specify a list of net-attach-defs. Signed-off-by: Xiaobin Qu Signed-off-by: Flavio Fernandes (cherry picked from commit a373249a5770ae37acf19b9382b8e2b0c1123b5f) --- .../status_manager/status_manager_test.go | 14 ++- .../pkg/crd/networkqos/v1alpha1/types.go | 18 ++-- go-controller/pkg/factory/factory.go | 10 +- go-controller/pkg/factory/factory_test.go | 16 +++- go-controller/pkg/factory/handler.go | 2 +- go-controller/pkg/libovsdb/ops/qos.go | 2 +- .../pkg/ovn/base_network_controller.go | 8 +- .../ovn/controller/network_qos/network_qos.go | 95 +++++++++++++++++-- .../network_qos/network_qos_controller.go | 26 ++++- .../network_qos/network_qos_test.go | 60 ++++++++---- 10 files changed, 191 insertions(+), 60 deletions(-) diff --git a/go-controller/pkg/clustermanager/status_manager/status_manager_test.go b/go-controller/pkg/clustermanager/status_manager/status_manager_test.go index fae419976a..e660bae402 100644 --- a/go-controller/pkg/clustermanager/status_manager/status_manager_test.go +++ b/go-controller/pkg/clustermanager/status_manager/status_manager_test.go @@ -22,6 +22,7 @@ import ( egressfirewallapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" egressqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + crdtypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -209,11 +210,16 @@ func newNetworkQoS(namespace string) *networkqosapi.NetworkQoS { return &networkqosapi.NetworkQoS{ ObjectMeta: util.NewObjectMeta("default", namespace), Spec: networkqosapi.Spec{ - NetworkAttachmentRefs: []v1.ObjectReference{ + NetworkSelectors: []crdtypes.NetworkSelector{ { - Kind: "NetworkAttachmentDefinition", - Namespace: "default", - Name: "stream", + NetworkSelectionType: crdtypes.NetworkAttachmentDefinitions, + NetworkAttachmentDefinitionSelector: &crdtypes.NetworkAttachmentDefinitionSelector{ + NetworkSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": "stream", + }, + }, + }, }, }, Priority: 100, diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/types.go b/go-controller/pkg/crd/networkqos/v1alpha1/types.go index 0a9997e45e..53ee00a712 100644 --- a/go-controller/pkg/crd/networkqos/v1alpha1/types.go +++ b/go-controller/pkg/crd/networkqos/v1alpha1/types.go @@ -17,9 +17,10 @@ limitations under the License. package v1alpha1 import ( - corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + crdtypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" ) // +genclient @@ -45,12 +46,12 @@ type NetworkQoS struct { // Spec defines the desired state of NetworkQoS type Spec struct { - // netAttachRefs points to a list of objects which could be either NAD, UDN, or Cluster UDN. - // In the case of NAD, the network type could be of type Layer-3, Layer-2, or Localnet. - // If not specified, then the primary network of the selected Pods will be chosen. + // networkSelector selects the networks on which the pod IPs need to be added to the source address set. + // NetworkQoS controller currently supports `NetworkAttachmentDefinitions` type only. // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="netAttachRefs is immutable" - NetworkAttachmentRefs []corev1.ObjectReference `json:"netAttachRefs,omitempty"` + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="networkSelector is immutable" + // +kubebuilder:validation:XValidation:rule="self.all(sel, sel.networkSelectionType == 'ClusterUserDefinedNetworks' || sel.networkSelectionType == 'NetworkAttachmentDefinitions')", message="Unsupported network selection type" + NetworkSelectors crdtypes.NetworkSelectors `json:"networkSelectors,omitempty"` // podSelector applies the NetworkQoS rule only to the pods in the namespace whose label // matches this definition. This field is optional, and in case it is not set @@ -69,6 +70,7 @@ type Spec struct { // within a single NetworkQos object (all of which share the priority) will be // determined by the order in which the rule is written. Thus, a rule that appears // first in the list of egress rules would take the lower precedence. + // +kubebuilder:validation:MaxItems=20 Egress []Rule `json:"egress"` } @@ -94,7 +96,7 @@ type Classifier struct { To []Destination `json:"to"` // +optional - Port Port `json:"port"` + Ports []*Port `json:"ports"` } // Bandwidth controls the maximum of rate traffic that can be sent @@ -127,7 +129,7 @@ type Port struct { // +kubebuilder:validation:Minimum:=1 // +kubebuilder:validation:Maximum:=65535 // +optional - Port int32 `json:"port"` + Port *int32 `json:"port"` } // Destination describes a peer to apply NetworkQoS configuration for the outgoing traffic. diff --git a/go-controller/pkg/factory/factory.go b/go-controller/pkg/factory/factory.go index ee8733ba13..b3277ea9cf 100644 --- a/go-controller/pkg/factory/factory.go +++ b/go-controller/pkg/factory/factory.go @@ -81,11 +81,11 @@ import ( egressservicescheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/scheme" egressserviceinformerfactory "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions" egressserviceinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/egressservice/v1" - networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1" + networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" networkqosscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/scheme" networkqosinformerfactory "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions" - networkqosinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1" - networkqoslister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1" + networkqosinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1alpha1" + networkqoslister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1" routeadvertisementsapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1" routeadvertisementsscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/clientset/versioned/scheme" routeadvertisementsinformerfactory "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1/apis/informers/externalversions" @@ -516,7 +516,7 @@ func NewOVNKubeControllerWatchFactory(ovnClientset *util.OVNKubeControllerClient if config.OVNKubernetesFeature.EnableNetworkQoS { wf.informers[NetworkQoSType], err = newQueuedInformer(eventQueueSize, NetworkQoSType, - wf.networkQoSFactory.K8s().V1().NetworkQoSes().Informer(), wf.stopChan, minNumEventQueues) + wf.networkQoSFactory.K8s().V1alpha1().NetworkQoSes().Informer(), wf.stopChan, minNumEventQueues) if err != nil { return nil, err } @@ -1820,7 +1820,7 @@ func (wf *WatchFactory) FRRConfigurationsInformer() frrinformer.FRRConfiguration } func (wf *WatchFactory) NetworkQoSInformer() networkqosinformer.NetworkQoSInformer { - return wf.networkQoSFactory.K8s().V1().NetworkQoSes() + return wf.networkQoSFactory.K8s().V1alpha1().NetworkQoSes() } // withServiceNameAndNoHeadlessServiceSelector returns a LabelSelector (added to the diff --git a/go-controller/pkg/factory/factory_test.go b/go-controller/pkg/factory/factory_test.go index af0e8ddb76..462833479a 100644 --- a/go-controller/pkg/factory/factory_test.go +++ b/go-controller/pkg/factory/factory_test.go @@ -38,9 +38,10 @@ import ( egressqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake" egressservice "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" egressservicefake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/fake" + crdtypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - networkqos "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1" + networkqos "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" networkqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake" . "github.com/onsi/ginkgo/v2" @@ -230,11 +231,16 @@ func newNetworkQoS(name, namespace string) *networkqos.NetworkQoS { return &networkqos.NetworkQoS{ ObjectMeta: newObjectMeta(name, namespace), Spec: networkqos.Spec{ - NetworkAttachmentRefs: []v1.ObjectReference{ + NetworkSelectors: []crdtypes.NetworkSelector{ { - Kind: "NetworkAttachmentDefinition", - Namespace: "default", - Name: "stream", + NetworkSelectionType: crdtypes.NetworkAttachmentDefinitions, + NetworkAttachmentDefinitionSelector: &crdtypes.NetworkAttachmentDefinitionSelector{ + NetworkSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": "stream", + }, + }, + }, }, }, Priority: 100, diff --git a/go-controller/pkg/factory/handler.go b/go-controller/pkg/factory/handler.go index dac86905d5..1e87f7309b 100644 --- a/go-controller/pkg/factory/handler.go +++ b/go-controller/pkg/factory/handler.go @@ -26,7 +26,7 @@ import ( egressiplister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/listers/egressip/v1" egressqoslister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1" egressservicelister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/listers/egressservice/v1" - networkqoslister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1" + networkqoslister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1" userdefinednetworklister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" ) diff --git a/go-controller/pkg/libovsdb/ops/qos.go b/go-controller/pkg/libovsdb/ops/qos.go index d991c4c007..21d6a2f7f8 100644 --- a/go-controller/pkg/libovsdb/ops/qos.go +++ b/go-controller/pkg/libovsdb/ops/qos.go @@ -125,7 +125,7 @@ func RemoveQoSesFromLogicalSwitchOps(nbClient libovsdbclient.Client, ops []ovsdb } // DeleteQoSesWithPredicateOps returns the ops to delete QoSes based on a given predicate -func DeleteQoSesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, p QoSPredicate) ([]libovsdb.Operation, error) { +func DeleteQoSesWithPredicateOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, p QoSPredicate) ([]ovsdb.Operation, error) { deleted := []*nbdb.QoS{} opModel := operationModel{ ModelPredicate: p, diff --git a/go-controller/pkg/ovn/base_network_controller.go b/go-controller/pkg/ovn/base_network_controller.go index 6494fe932f..793a78ee1e 100644 --- a/go-controller/pkg/ovn/base_network_controller.go +++ b/go-controller/pkg/ovn/base_network_controller.go @@ -21,9 +21,9 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" + nadinformerv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1" libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/ovsdb" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/pod" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" @@ -1062,6 +1062,11 @@ func (bnc *BaseNetworkController) DeleteResourceCommon(objType reflect.Type, obj func (bnc *BaseNetworkController) newNetworkQoSController() error { var err error + var nadInformer nadinformerv1.NetworkAttachmentDefinitionInformer + + if config.OVNKubernetesFeature.EnableMultiNetwork { + nadInformer = bnc.watchFactory.NADInformer() + } bnc.nqosController, err = nqoscontroller.NewController( bnc.controllerName, bnc.ReconcilableNetInfo.GetNetInfo(), @@ -1072,6 +1077,7 @@ func (bnc *BaseNetworkController) newNetworkQoSController() error { bnc.watchFactory.NamespaceCoreInformer(), bnc.watchFactory.PodCoreInformer(), bnc.watchFactory.NodeCoreInformer(), + nadInformer, bnc.addressSetFactory, bnc.isPodScheduledinLocalZone, bnc.zone, diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos.go b/go-controller/pkg/ovn/controller/network_qos/network_qos.go index d82b24944a..818b45604c 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos.go @@ -17,8 +17,11 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/ptr" + nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" nqosapiapply "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1" + crdtypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" + udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" ) @@ -66,12 +69,15 @@ func (c *Controller) syncNetworkQoS(key string) error { return err } if nqos == nil { - klog.V(6).Infof("%s - NetworkQoS %s has gone", c.controllerName, key) + klog.V(6).Infof("%s - NetworkQoS %s no longer exists.", c.controllerName, key) return c.nqosCache.DoWithLock(key, func(nqosKey string) error { return c.clearNetworkQos(nqosNamespace, nqosName) }) } - if !c.networkManagedByMe(nqos.Spec.NetworkAttachmentRefs) { + + if networkManagedByMe, err := c.networkManagedByMe(nqos.Spec.NetworkSelectors); err != nil { + return err + } else if !networkManagedByMe { // maybe NetworkAttachmentName has been changed from this one to other value, try cleanup anyway return c.nqosCache.DoWithLock(key, func(nqosKey string) error { return c.clearNetworkQos(nqosNamespace, nqosName) @@ -301,18 +307,87 @@ func (c *Controller) resyncPods(nqosState *networkQoSState) error { return nil } -func (c *Controller) networkManagedByMe(nadRefs []corev1.ObjectReference) bool { - if len(nadRefs) == 0 { - return c.IsDefault() +var cudnController = udnv1.SchemeGroupVersion.WithKind("ClusterUserDefinedNetwork") + +func (c *Controller) networkManagedByMe(networkSelectors crdtypes.NetworkSelectors) (bool, error) { + // return c.IsDefault() if multi-network is disabled or no selectors is provided in spec + if c.nadLister == nil || len(networkSelectors) == 0 { + return c.IsDefault(), nil + } + var selectedNads []*nadv1.NetworkAttachmentDefinition + for _, networkSelector := range networkSelectors { + switch networkSelector.NetworkSelectionType { + case crdtypes.DefaultNetwork: + return c.IsDefault(), nil + case crdtypes.ClusterUserDefinedNetworks: + nadSelector, err := metav1.LabelSelectorAsSelector(&networkSelector.ClusterUserDefinedNetworkSelector.NetworkSelector) + if err != nil { + return false, err + } + nads, err := c.nadLister.List(nadSelector) + if err != nil { + return false, err + } + for _, nad := range nads { + // check this NAD is controlled by a CUDN + controller := metav1.GetControllerOfNoCopy(nad) + isCUDN := controller != nil && controller.Kind == cudnController.Kind && controller.APIVersion == cudnController.GroupVersion().String() + if !isCUDN { + continue + } + selectedNads = append(selectedNads, nad) + } + case crdtypes.NetworkAttachmentDefinitions: + if networkSelector.NetworkAttachmentDefinitionSelector == nil { + return false, fmt.Errorf("empty network attachment definition selector") + } + nadSelector, err := metav1.LabelSelectorAsSelector(&networkSelector.NetworkAttachmentDefinitionSelector.NetworkSelector) + if err != nil { + return false, err + } + if nadSelector.Empty() { + return false, fmt.Errorf("empty network selector") + } + nsSelector, err := metav1.LabelSelectorAsSelector(&networkSelector.NetworkAttachmentDefinitionSelector.NamespaceSelector) + if err != nil { + return false, err + } + + if nsSelector.Empty() { + // if namespace selector is empty, list NADs in all namespaces + nads, err := c.nadLister.List(nadSelector) + if err != nil { + return false, err + } + selectedNads = append(selectedNads, nads...) + } else { + namespaces, err := c.nqosNamespaceLister.List(nsSelector) + if err != nil { + return false, err + } + for _, ns := range namespaces { + nads, err := c.nadLister.NetworkAttachmentDefinitions(ns.Name).List(nadSelector) + if err != nil { + return false, err + } + selectedNads = append(selectedNads, nads...) + } + } + default: + return false, fmt.Errorf("unsupported network selection type %s", networkSelector.NetworkSelectionType) + } + } + if len(selectedNads) == 0 { + return false, nil } - for _, nadRef := range nadRefs { - nadKey := joinMetaNamespaceAndName(nadRef.Namespace, nadRef.Name) - if ((nadKey == "" || nadKey == types.DefaultNetworkName) && c.IsDefault()) || + for _, nad := range selectedNads { + nadKey := joinMetaNamespaceAndName(nad.Namespace, nad.Name) + if ((nadKey == types.DefaultNetworkName) && c.IsDefault()) || (!c.IsDefault() && c.HasNAD(nadKey)) { - return true + return true, nil } } - return false + return false, nil } func (c *Controller) getLogicalSwitchName(nodeName string) string { diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go index 6a0bcee130..4dd89aae60 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go @@ -19,6 +19,8 @@ import ( libovsdbclient "github.com/ovn-org/libovsdb/client" + nadinformerv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1" + nadlisterv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" networkqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" networkqosinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1alpha1" @@ -84,6 +86,10 @@ type Controller struct { nqosNodeLister corev1listers.NodeLister nqosNodeSynced cache.InformerSynced nqosNodeQueue workqueue.TypedRateLimitingInterface[string] + + // nad lister, only valid for default network controller when multi-network is enabled + nadLister nadlisterv1.NetworkAttachmentDefinitionLister + nadSynced cache.InformerSynced } // NewController returns a new *Controller. @@ -97,6 +103,7 @@ func NewController( namespaceInformer corev1informers.NamespaceInformer, podInformer corev1informers.PodInformer, nodeInformer corev1informers.NodeInformer, + nadInformer nadinformerv1.NetworkAttachmentDefinitionInformer, addressSetFactory addressset.AddressSetFactory, isPodScheduledinLocalZone func(*v1.Pod) bool, zone string) (*Controller, error) { @@ -163,7 +170,7 @@ func NewController( klog.V(5).Info("Setting up event handlers for Nodes in Network QoS controller") c.nqosNodeLister = nodeInformer.Lister() - c.nqosNodeSynced = podInformer.Informer().HasSynced + c.nqosNodeSynced = nodeInformer.Informer().HasSynced c.nqosNodeQueue = workqueue.NewTypedRateLimitingQueueWithConfig( workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), workqueue.TypedRateLimitingQueueConfig[string]{Name: "nqosNodes"}, @@ -175,6 +182,11 @@ func NewController( return nil, fmt.Errorf("could not add Event Handler for node Informer during network qos controller initialization, %w", err) } + if nadInformer != nil { + c.nadLister = nadInformer.Lister() + c.nadSynced = nadInformer.Informer().HasSynced + } + c.eventRecorder = recorder return c, nil } @@ -187,12 +199,18 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) { klog.Infof("Starting controller %s", c.controllerName) // Wait for the caches to be synced - klog.V(5).Info("Waiting for informer caches to sync") + klog.V(5).Info("Waiting for informer caches (networkqos,namespace,pod,node) to sync") if !util.WaitForInformerCacheSyncWithTimeout(c.controllerName, stopCh, c.nqosCacheSynced, c.nqosNamespaceSynced, c.nqosPodSynced, c.nqosNodeSynced) { - utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) - klog.Errorf("Error syncing caches for network qos") + utilruntime.HandleError(fmt.Errorf("timed out waiting for informer caches (networkqos,namespace,pod,node) to sync")) return } + if c.nadSynced != nil { + klog.V(5).Info("Waiting for net-attach-def informer cache to sync") + if !util.WaitForInformerCacheSyncWithTimeout(c.controllerName, stopCh, c.nadSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for net-attach-def informer cache to sync")) + return + } + } klog.Infof("Repairing Network QoSes") // Run the repair function at startup so that we synchronize KAPI and OVNDBs diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go index 00cb68aab2..85bbd65b93 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go @@ -15,14 +15,14 @@ import ( corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/kubernetes" libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" nqostype "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" - fakenqosclient "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake" + networkqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" + crdtypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" @@ -38,7 +38,7 @@ func init() { config.IPv4Mode = true config.IPv6Mode = false config.OVNKubernetesFeature.EnableNetworkQoS = true - config.OVNKubernetesFeature.EnableMultiNetwork = false + config.OVNKubernetesFeature.EnableMultiNetwork = true config.OVNKubernetesFeature.EnableInterconnect = false // set via tableEntrySetup } @@ -49,8 +49,8 @@ var ( stopChan chan (struct{}) nbClient libovsdbclient.Client nbsbCleanup *libovsdbtest.Context - fakeKubeClient *fake.Clientset - fakeNQoSClient *fakenqosclient.Clientset + fakeKubeClient kubernetes.Interface + fakeNQoSClient networkqosclientset.Interface wg sync.WaitGroup defaultAddrsetFactory addressset.AddressSetFactory streamAddrsetFactory addressset.AddressSetFactory @@ -216,6 +216,11 @@ func tableEntrySetup(enableInterconnect bool) { }, } + nad := ovnk8stesting.GenerateNAD("stream", "stream", "default", types.Layer3Topology, "10.128.2.0/16/24", types.NetworkRoleSecondary) + nad.Labels = map[string]string{ + "name": "stream", + } + initialDB := &libovsdbtest.TestSetup{ NBData: []libovsdbtest.TestData{ &nbdb.LogicalSwitch{ @@ -230,11 +235,13 @@ func tableEntrySetup(enableInterconnect bool) { }, } - initEnv([]runtime.Object{ns0, ns1, ns3, node1, node2, clientPod}, []runtime.Object{nqos}, initialDB) + ovnClientset := util.GetOVNClientset(ns0, ns1, ns3, node1, node2, clientPod, nqos, nad) + fakeKubeClient = ovnClientset.KubeClient + fakeNQoSClient = ovnClientset.NetworkQoSClient + initEnv(ovnClientset, initialDB) // init controller for default network initNetworkQoSController(&util.DefaultNetInfo{}, defaultAddrsetFactory, defaultControllerName) // init controller for stream nad - nad := ovnk8stesting.GenerateNAD("stream", "stream", "default", types.Layer3Topology, "10.128.2.0/16/24", types.NetworkRoleSecondary) streamImmutableNadInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) streamNadInfo := util.NewMutableNetInfo(streamImmutableNadInfo) @@ -474,11 +481,16 @@ var _ = Describe("NetworkQoS Controller", func() { Name: "stream-qos", }, Spec: nqostype.Spec{ - NetworkAttachmentRefs: []corev1.ObjectReference{ + NetworkSelectors: []crdtypes.NetworkSelector{ { - Kind: "NetworkAttachmentDefinition", - Namespace: "default", - Name: "unknown", + NetworkSelectionType: crdtypes.NetworkAttachmentDefinitions, + NetworkAttachmentDefinitionSelector: &crdtypes.NetworkAttachmentDefinitionSelector{ + NetworkSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": "unknown", + }, + }, + }, }, }, Priority: 100, @@ -521,11 +533,16 @@ var _ = Describe("NetworkQoS Controller", func() { By("handles NetworkQos on secondary network") { - nqos4StreamNet.Spec.NetworkAttachmentRefs = []corev1.ObjectReference{ + nqos4StreamNet.Spec.NetworkSelectors = []crdtypes.NetworkSelector{ { - Kind: "NetworkAttachmentDefinition", - Namespace: "default", - Name: "stream", + NetworkSelectionType: crdtypes.NetworkAttachmentDefinitions, + NetworkAttachmentDefinitionSelector: &crdtypes.NetworkAttachmentDefinitionSelector{ + NetworkSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": "stream", + }, + }, + }, }, } nqos4StreamNet.ResourceVersion = time.Now().String() @@ -812,16 +829,16 @@ func eventuallySwitchHasNoQoS(switchName string, qos *nbdb.QoS) { }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("Unexpected QoS rule %s found in switch %s", qos.UUID, switchName)) } -func initEnv(k8sObjects []runtime.Object, nqosObjects []runtime.Object, initialDB *libovsdbtest.TestSetup) { +func initEnv(clientset *util.OVNClientset, initialDB *libovsdbtest.TestSetup) { var nbZoneFailed bool var err error stopChan = make(chan struct{}) - fakeKubeClient = fake.NewSimpleClientset(k8sObjects...) - fakeNQoSClient = fakenqosclient.NewSimpleClientset(nqosObjects...) + watchFactory, err = factory.NewMasterWatchFactory( &util.OVNMasterClientset{ - KubeClient: fakeKubeClient, - NetworkQoSClient: fakeNQoSClient, + KubeClient: clientset.KubeClient, + NetworkQoSClient: clientset.NetworkQoSClient, + NetworkAttchDefClient: clientset.NetworkAttchDefClient, }, ) Expect(err).NotTo(HaveOccurred()) @@ -858,6 +875,7 @@ func initNetworkQoSController(netInfo util.NetInfo, addrsetFactory addressset.Ad watchFactory.NamespaceCoreInformer(), watchFactory.PodCoreInformer(), watchFactory.NodeCoreInformer(), + watchFactory.NADInformer(), addrsetFactory, func(pod *corev1.Pod) bool { return pod.Spec.NodeName == "node1" From ea5da66b374037d6d0b938626b9204cc6282d471 Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Wed, 12 Mar 2025 21:39:13 +0000 Subject: [PATCH 12/18] fix lint MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit pkg/ovn/controller/network_qos/network_qos_pod.go:9:2: import "k8s.io/api/core/v1" imported as "v1" but must be "corev1" according to config (importas) v1 "k8s.io/api/core/v1" ^ pkg/ovn/controller/network_qos/network_qos_test.go:547:26: Error return value of addrset.AddAddresses is not checked (errcheck) addrset.AddAddresses([]string{"10.194.188.4"}) ^ pkg/factory/factory_test.go:41: File is not gci-ed with --skip-generated -s standard -s default -s prefix(k8s.io,sigs.k8s.io) -s prefix(github.com/ovn-org) -s localmodule -s dot --custom-order (gci) "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" pkg/factory/factory_test.go:44: File is not gci-ed with --skip-generated -s standard -s default -s prefix(k8s.io,sigs.k8s.io) -s prefix(github.com/ovn-org) -s localmodule -s dot --custom-order (gci) networkqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1/apis/clientset/versioned/fake" pkg/ovn/controller/network_qos/network_qos_test.go:12: File is not gci-ed with --skip-generated -s standard -s default -s prefix(k8s.io,sigs.k8s.io) -s prefix(github.com/ovn-org) -s localmodule -s dot --custom-order (gci) . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" pkg/factory/factory_test.go:2160:21: unused-parameter: parameter 'old' seems to be unused, consider removing or renaming it as _ (revive) UpdateFunc: func(old, new interface{}) { ^ pkg/ovn/controller/network_qos/network_qos.go:70:43: unused-parameter: parameter 'nqosKey' seems to be unused, consider removing or renaming it as _ (revive) return c.nqosCache.DoWithLock(key, func(nqosKey string) error { ^ pkg/ovn/controller/network_qos/network_qos.go:84:43: unused-parameter: parameter 'nqosKey' seems to be unused, consider removing or renaming it as _ (revive) return c.nqosCache.DoWithLock(key, func(nqosKey string) error { ^ pkg/ovn/controller/network_qos/network_qos.go:91:42: unused-parameter: parameter 'nqosKey' seems to be unused, consider removing or renaming it as _ (revive) return c.nqosCache.DoWithLock(key, func(nqosKey string) error { ^ pkg/ovn/controller/network_qos/types.go:352:28: unused-parameter: parameter 'value' seems to be unused, consider removing or renaming it as _ (revive) dest.Pods.Range(func(key, value any) bool { ^ pkg/ovn/controller/network_qos/network_qos_test.go:906:12: unused-parameter: parameter 'nbGlobal' seems to be unused, consider removing or renaming it as _ (revive) p := func(nbGlobal *nbdb.NBGlobal) bool { ^ pkg/ovn/controller/network_qos/network_qos_controller.go:9:2: import "k8s.io/api/core/v1" imported as "v1" but must be "corev1" according to config (importas) v1 "k8s.io/api/core/v1" ^ level=info msg="[runner/max_same_issues] 14/17 issues with text "File is not gci-ed with --skip-generated -s standard -s default -s prefix(k8s.io,sigs.k8s.io) -s prefix(github.com/ovn-org) -s localmodule -s dot --custom-order" were hidden, use --max-same-issues" level=info msg="[runner/max_same_issues] 2/5 issues with text "import \"k8s.io/api/core/v1\" imported as \"v1\" but must be \"corev1\" according to config" were hidden, use --max-same-issues" level=info msg="[runner] Issues before processing: 154, after processing: 15" level=info msg="[runner] Processors filtering stat (out/in): invalid_issue: 154/154, path_prettifier: 154/154, max_same_issues: 15/31, severity-rules: 15/15, path_prefixer: 15/15, filename_unadjuster: 154/154, identifier_marker: 104/104, source_code: 15/15, cgo: 154/154, exclude: 104/104, exclude-rules: 34/104, diff: 31/31, max_from_linter: 15/15, fixer: 15/15, sort_results: 15/15, skip_files: 154/154, autogenerated_exclude: 104/113, nolint: 31/34, uniq_by_line: 31/31, max_per_file_from_linter: 31/31, path_shortener: 15/15, skip_dirs: 113/154" level=info msg="[runner] processing took 20.696604ms with stages: nolint: 9.359103ms, path_prettifier: 4.425669ms, autogenerated_exclude: 3.521318ms, source_code: 1.252048ms, exclude-rules: 986.811µs, identifier_marker: 704.888µs, skip_dirs: 394.602µs, max_same_issues: 15.034µs, cgo: 9.933µs, invalid_issue: 7.606µs, uniq_by_ line: 6.214µs, path_shortener: 4.436µs, filename_unadjuster: 3.748µs, max_from_linter: 3.014µs, max_per_file_from_linter: 672ns, fixer: 346ns, skip_files: 311ns, diff: 231ns, exclude: 218ns, sort_results: 204ns, severity-rules: 104ns, path_prefixer: 94ns" level=info msg="[runner] linters took 16.440293927s with stages: goanalysis_metalinter: 16.419554906s" level=info msg="File cache stats: 8 entries of total size 176.9KiB" level=info msg="Memory: 431 samples, avg is 1072.1MB, max is 4950.4MB" level=info msg="Execution took 43.614943666s" pkg/ovn/controller/network_qos/network_qos_namespace.go:8:2: import "k8s.io/api/core/v1" imported as "v1" but must be "corev1" according to config (importas) v1 "k8s.io/api/core/v1" ^ pkg/ovn/controller/network_qos/network_qos_node.go:10:2: import "k8s.io/api/core/v1" imported as "v1" but must be "corev1" according to config (importas) v1 "k8s.io/api/core/v1" ^ pkg/ovn/controller/network_qos/network_qos_ovnnb.go:10:2: import "github.com/ovn-org/libovsdb/ovsdb" imported as "libovsdb" but must be "" according to config (importas) libovsdb "github.com/ovn-org/libovsdb/ovsdb" ^ pkg/ovn/controller/network_qos/types.go:13:2: import "k8s.io/apimachinery/pkg/api/errors" imported without alias but must be with alias "apierrors" according to config (importas) "k8s.io/apimachinery/pkg/api/errors" ^ Signed-off-by: Flavio Fernandes (cherry picked from commit 4f2f5a176b9444eacf9508abbbb3f149ae824bec) --- .../status_manager/networkqos_manager.go | 4 ++-- .../status_manager/status_manager_test.go | 2 +- go-controller/pkg/factory/factory_test.go | 7 +++--- .../pkg/ovn/base_network_controller.go | 4 +++- .../pkg/ovn/controller/network_qos/metrics.go | 3 ++- .../ovn/controller/network_qos/network_qos.go | 9 ++++---- .../network_qos/network_qos_controller.go | 23 ++++++++++--------- .../network_qos/network_qos_namespace.go | 4 ++-- .../network_qos/network_qos_node.go | 13 ++++++----- .../network_qos/network_qos_ovnnb.go | 12 +++++----- .../controller/network_qos/network_qos_pod.go | 6 ++--- .../network_qos/network_qos_test.go | 11 +++++---- .../pkg/ovn/controller/network_qos/types.go | 16 ++++++------- 13 files changed, 60 insertions(+), 54 deletions(-) diff --git a/go-controller/pkg/clustermanager/status_manager/networkqos_manager.go b/go-controller/pkg/clustermanager/status_manager/networkqos_manager.go index ef63dc0288..5be1390505 100644 --- a/go-controller/pkg/clustermanager/status_manager/networkqos_manager.go +++ b/go-controller/pkg/clustermanager/status_manager/networkqos_manager.go @@ -4,13 +4,13 @@ import ( "context" "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" networkqosapply "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1" networkqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" networkqoslisters "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/listers/networkqos/v1alpha1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type networkQoSManager struct { diff --git a/go-controller/pkg/clustermanager/status_manager/status_manager_test.go b/go-controller/pkg/clustermanager/status_manager/status_manager_test.go index e660bae402..6621ac20f4 100644 --- a/go-controller/pkg/clustermanager/status_manager/status_manager_test.go +++ b/go-controller/pkg/clustermanager/status_manager/status_manager_test.go @@ -7,6 +7,7 @@ import ( "sync/atomic" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -29,7 +30,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - networkingv1 "k8s.io/api/networking/v1" ) func getNodeWithZone(nodeName, zoneName string) *corev1.Node { diff --git a/go-controller/pkg/factory/factory_test.go b/go-controller/pkg/factory/factory_test.go index 462833479a..a2bcf974c3 100644 --- a/go-controller/pkg/factory/factory_test.go +++ b/go-controller/pkg/factory/factory_test.go @@ -38,11 +38,10 @@ import ( egressqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake" egressservice "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" egressservicefake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/clientset/versioned/fake" - crdtypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - networkqos "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" networkqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned/fake" + crdtypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -2165,7 +2164,7 @@ var _ = Describe("Watch Factory Operations", func() { networkQoS := obj.(*networkqos.NetworkQoS) Expect(reflect.DeepEqual(networkQoS, added)).To(BeTrue()) }, - UpdateFunc: func(old, new interface{}) { + UpdateFunc: func(_, new interface{}) { newNetworkQoS := new.(*networkqos.NetworkQoS) Expect(reflect.DeepEqual(newNetworkQoS, added)).To(BeTrue()) Expect(newNetworkQoS.Spec.Egress[0].DSCP).To(Equal(42)) diff --git a/go-controller/pkg/ovn/base_network_controller.go b/go-controller/pkg/ovn/base_network_controller.go index 793a78ee1e..4a04ab4352 100644 --- a/go-controller/pkg/ovn/base_network_controller.go +++ b/go-controller/pkg/ovn/base_network_controller.go @@ -8,6 +8,8 @@ import ( "sync" "time" + nadinformerv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1" + corev1 "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -21,9 +23,9 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - nadinformerv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1" libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/pod" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/ovn/controller/network_qos/metrics.go b/go-controller/pkg/ovn/controller/network_qos/metrics.go index bd04523190..96fa30834d 100644 --- a/go-controller/pkg/ovn/controller/network_qos/metrics.go +++ b/go-controller/pkg/ovn/controller/network_qos/metrics.go @@ -1,8 +1,9 @@ package networkqos import ( - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/prometheus/client_golang/prometheus" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" ) // Metrics to be exposed diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos.go b/go-controller/pkg/ovn/controller/network_qos/network_qos.go index 818b45604c..6a6c642e8d 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos.go @@ -6,6 +6,8 @@ import ( "sync" "time" + nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -17,7 +19,6 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/ptr" - nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" nqosapiapply "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1" crdtypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" @@ -70,7 +71,7 @@ func (c *Controller) syncNetworkQoS(key string) error { } if nqos == nil { klog.V(6).Infof("%s - NetworkQoS %s no longer exists.", c.controllerName, key) - return c.nqosCache.DoWithLock(key, func(nqosKey string) error { + return c.nqosCache.DoWithLock(key, func(_ string) error { return c.clearNetworkQos(nqosNamespace, nqosName) }) } @@ -79,14 +80,14 @@ func (c *Controller) syncNetworkQoS(key string) error { return err } else if !networkManagedByMe { // maybe NetworkAttachmentName has been changed from this one to other value, try cleanup anyway - return c.nqosCache.DoWithLock(key, func(nqosKey string) error { + return c.nqosCache.DoWithLock(key, func(_ string) error { return c.clearNetworkQos(nqosNamespace, nqosName) }) } klog.V(5).Infof("%s - Processing NetworkQoS %s/%s", c.controllerName, nqos.Namespace, nqos.Name) // at this stage the NQOS exists in the cluster - return c.nqosCache.DoWithLock(key, func(nqosKey string) error { + return c.nqosCache.DoWithLock(key, func(_ string) error { // save key to avoid racing c.nqosCache.Store(key, nil) if err = c.ensureNetworkQos(nqos); err != nil { diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go index 4dd89aae60..e997271412 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go @@ -6,7 +6,10 @@ import ( "sync" "time" - v1 "k8s.io/api/core/v1" + nadinformerv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1" + nadlisterv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" + + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -19,8 +22,6 @@ import ( libovsdbclient "github.com/ovn-org/libovsdb/client" - nadinformerv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1" - nadlisterv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" networkqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" networkqosinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/informers/externalversions/networkqos/v1alpha1" @@ -61,7 +62,7 @@ type Controller struct { // we consider it remote - this is ok for this controller as this variable is only used to // determine if we need to add pod's port to port group or not - future updates should // take care of reconciling the state of the cluster - isPodScheduledinLocalZone func(*v1.Pod) bool + isPodScheduledinLocalZone func(*corev1.Pod) bool // store's the name of the zone that this controller belongs to zone string @@ -105,7 +106,7 @@ func NewController( nodeInformer corev1informers.NodeInformer, nadInformer nadinformerv1.NetworkAttachmentDefinitionInformer, addressSetFactory addressset.AddressSetFactory, - isPodScheduledinLocalZone func(*v1.Pod) bool, + isPodScheduledinLocalZone func(*corev1.Pod) bool, zone string) (*Controller, error) { c := &Controller{ @@ -355,8 +356,8 @@ func (c *Controller) onNQOSNamespaceAdd(obj interface{}) { // onNQOSNamespaceUpdate queues the namespace for processing. func (c *Controller) onNQOSNamespaceUpdate(oldObj, newObj interface{}) { - oldNamespace := oldObj.(*v1.Namespace) - newNamespace := newObj.(*v1.Namespace) + oldNamespace := oldObj.(*corev1.Namespace) + newNamespace := newObj.(*corev1.Namespace) // don't process resync or objects that are marked for deletion if oldNamespace.ResourceVersion == newNamespace.ResourceVersion || @@ -401,8 +402,8 @@ func (c *Controller) onNQOSPodAdd(obj interface{}) { // onNQOSPodUpdate queues the pod for processing. func (c *Controller) onNQOSPodUpdate(oldObj, newObj interface{}) { - oldPod := oldObj.(*v1.Pod) - newPod := newObj.(*v1.Pod) + oldPod := oldObj.(*corev1.Pod) + newPod := newObj.(*corev1.Pod) // don't process resync or objects that are marked for deletion if oldPod.ResourceVersion == newPod.ResourceVersion || @@ -447,8 +448,8 @@ func (c *Controller) onNQOSPodDelete(obj interface{}) { // onNQOSNodeUpdate queues the node for processing. func (c *Controller) onNQOSNodeUpdate(oldObj, newObj interface{}) { - oldNode := oldObj.(*v1.Node) - newNode := newObj.(*v1.Node) + oldNode := oldObj.(*corev1.Node) + newNode := newObj.(*corev1.Node) // don't process resync or objects that are marked for deletion if oldNode.ResourceVersion == newNode.ResourceVersion || diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go index af96e8b77a..46b629641d 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go @@ -5,7 +5,7 @@ import ( "sync" "time" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/klog/v2" @@ -108,7 +108,7 @@ func (c *Controller) clearNamespaceForNQOS(namespace string, nqosState *networkQ // setNamespaceForNQOS will handle the logic for figuring out if the provided namespace name // has pods that need to populate or removed from the address sets of the network qoses. -func (c *Controller) setNamespaceForNQOS(namespace *v1.Namespace, nqosState *networkQoSState) error { +func (c *Controller) setNamespaceForNQOS(namespace *corev1.Namespace, nqosState *networkQoSState) error { for _, rule := range nqosState.EgressRules { if rule.Classifier == nil { continue diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go index 9e22a2831c..fed2f8dbbf 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go @@ -5,14 +5,15 @@ import ( "sync" "time" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) func (c *Controller) processNextNQOSNodeWorkItem(wg *sync.WaitGroup) bool { @@ -102,12 +103,12 @@ func (c *Controller) syncNetworkQoSNode(key string) error { return nil } -func (c *Controller) getPodsByNode(nodeName string) ([]*v1.Pod, error) { +func (c *Controller) getPodsByNode(nodeName string) ([]*corev1.Pod, error) { pods, err := c.nqosPodLister.List(labels.Everything()) if err != nil { return nil, fmt.Errorf("failed to list pods: %w", err) } - podsByNode := []*v1.Pod{} + podsByNode := []*corev1.Pod{} for _, pod := range pods { if util.PodScheduled(pod) && !util.PodWantsHostNetwork(pod) && pod.Spec.NodeName == nodeName { podsByNode = append(podsByNode, pod) @@ -117,7 +118,7 @@ func (c *Controller) getPodsByNode(nodeName string) ([]*v1.Pod, error) { } // isNodeInLocalZone returns whether the provided node is in a zone local to the zone controller -func (c *Controller) isNodeInLocalZone(node *v1.Node) bool { +func (c *Controller) isNodeInLocalZone(node *corev1.Node) bool { return util.GetNodeZone(node) == c.zone } diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go index df35a992ec..03bf519ebd 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go @@ -7,7 +7,7 @@ import ( "strconv" libovsdbclient "github.com/ovn-org/libovsdb/client" - libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" @@ -59,7 +59,7 @@ func (c *Controller) addQoSToLogicalSwitch(qosState *networkQoSState, switchName } qoses = append(qoses, qos) } - ops := []libovsdb.Operation{} + ops := []ovsdb.Operation{} ops, err = libovsdbops.CreateOrUpdateQoSesOps(c.nbClient, ops, qoses...) if err != nil { return fmt.Errorf("failed to create QoS operations for %s/%s: %w", qosState.namespace, qosState.name, err) @@ -94,7 +94,7 @@ func (c *Controller) removeQoSFromLogicalSwitches(qosState *networkQoSState, swi if err != nil { return fmt.Errorf("failed to look up QoSes for %s/%s: %v", qosState.namespace, qosState.name, err) } - unbindQoSOps := []libovsdb.Operation{} + unbindQoSOps := []ovsdb.Operation{} // remove qos rules from logical switches for _, lsName := range switchNames { ops, err := libovsdbops.RemoveQoSesFromLogicalSwitchOps(c.nbClient, nil, lsName, qoses...) @@ -159,7 +159,7 @@ func (c *Controller) cleanupStaleOvnObjects(qosState *networkQoSState) error { } // remove stale qos rules from logical switches for lsName, qoses := range staleSwitchQoSMap { - var switchOps []libovsdb.Operation + var switchOps []ovsdb.Operation switchOps, err = libovsdbops.RemoveQoSesFromLogicalSwitchOps(c.nbClient, switchOps, lsName, qoses...) if err != nil { return fmt.Errorf("failed to get ops to remove stale QoSes from switches %s for NetworkQoS %s/%s: %w", lsName, qosState.namespace, qosState.name, err) @@ -216,7 +216,7 @@ func (c *Controller) deleteOvnQoSes(qoses []*nbdb.QoS) error { switchQoSMap[ls.Name] = qosList } } - unbindQoSOps := []libovsdb.Operation{} + unbindQoSOps := []ovsdb.Operation{} // remove qos rules from logical switches for lsName, qoses := range switchQoSMap { ops, err := libovsdbops.RemoveQoSesFromLogicalSwitchOps(c.nbClient, nil, lsName, qoses...) @@ -259,7 +259,7 @@ func (c *Controller) deleteAddressSet(qosName string) error { // 1. find address sets owned by NetworkQoS // 2. get address sets in use // 3. compare and identify those not in use -func (c *Controller) findStaleAddressSets(qosState *networkQoSState) ([]libovsdb.Operation, error) { +func (c *Controller) findStaleAddressSets(qosState *networkQoSState) ([]ovsdb.Operation, error) { staleAddressSets := []*nbdb.AddressSet{} addrsets, err := libovsdbops.FindAddressSetsWithPredicate(c.nbClient, func(item *nbdb.AddressSet) bool { return item.ExternalIDs[libovsdbops.OwnerControllerKey.String()] == c.controllerName && diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go index 0c901abd99..440f25f4b4 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go @@ -6,7 +6,7 @@ import ( "sync" "time" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" @@ -142,7 +142,7 @@ func (c *Controller) clearPodForNQOS(namespace, name string, nqosState *networkQ // setPodForNQOS will check if the pod meets source selector or dest selector // - match source: add the ip to source address set, bind qos rule to the switch // - match dest: add the ip to the destination address set -func (c *Controller) setPodForNQOS(pod *v1.Pod, nqosState *networkQoSState, namespace *v1.Namespace) error { +func (c *Controller) setPodForNQOS(pod *corev1.Pod, nqosState *networkQoSState, namespace *corev1.Namespace) error { addresses, err := getPodAddresses(pod, c.NetInfo) if err == nil && len(addresses) == 0 { // pod either is not attached to this network, or hasn't been annotated with addresses yet, return without retry @@ -174,7 +174,7 @@ func (c *Controller) setPodForNQOS(pod *v1.Pod, nqosState *networkQoSState, name return reconcilePodForDestinations(nqosState, namespace, pod, addresses) } -func reconcilePodForDestinations(nqosState *networkQoSState, podNs *v1.Namespace, pod *v1.Pod, addresses []string) error { +func reconcilePodForDestinations(nqosState *networkQoSState, podNs *corev1.Namespace, pod *corev1.Pod, addresses []string) error { fullPodName := joinMetaNamespaceAndName(pod.Namespace, pod.Name) for _, rule := range nqosState.EgressRules { for index, dest := range rule.Classifier.Destinations { diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go index 85bbd65b93..bda66a3c0f 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go @@ -9,9 +9,6 @@ import ( "testing" "time" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,6 +29,9 @@ import ( libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" ) func init() { @@ -559,7 +559,8 @@ var _ = Describe("NetworkQoS Controller", func() { libovsdbops.ObjectNameKey: nqosNamespace, }) addrset, err := defaultAddrsetFactory.EnsureAddressSet(dbIDs) - addrset.AddAddresses([]string{"10.194.188.4"}) + Expect(err).NotTo(HaveOccurred()) + err = addrset.AddAddresses([]string{"10.194.188.4"}) Expect(err).NotTo(HaveOccurred()) nqosWithoutSrcSelector := &nqostype.NetworkQoS{ ObjectMeta: metav1.ObjectMeta{ @@ -918,7 +919,7 @@ func createTestNBGlobal(nbClient libovsdbclient.Client, zone string) error { } func deleteTestNBGlobal(nbClient libovsdbclient.Client) error { - p := func(nbGlobal *nbdb.NBGlobal) bool { + p := func(_ *nbdb.NBGlobal) bool { return true } ops, err := nbClient.WhereCache(p).Delete() diff --git a/go-controller/pkg/ovn/controller/network_qos/types.go b/go-controller/pkg/ovn/controller/network_qos/types.go index 8ea1ddd8d1..2d9dd8a36b 100644 --- a/go-controller/pkg/ovn/controller/network_qos/types.go +++ b/go-controller/pkg/ovn/controller/network_qos/types.go @@ -8,9 +8,9 @@ import ( "sync" "time" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" @@ -79,7 +79,7 @@ func (nqosState *networkQoSState) initAddressSets(addressSetFactory addressset.A return nil } -func (nqosState *networkQoSState) matchSourceSelector(pod *v1.Pod) bool { +func (nqosState *networkQoSState) matchSourceSelector(pod *corev1.Pod) bool { if pod.Namespace != nqosState.namespace { return false } @@ -89,7 +89,7 @@ func (nqosState *networkQoSState) matchSourceSelector(pod *v1.Pod) bool { return nqosState.PodSelector.Matches(labels.Set(pod.Labels)) } -func (nqosState *networkQoSState) configureSourcePod(ctrl *Controller, pod *v1.Pod, addresses []string) error { +func (nqosState *networkQoSState) configureSourcePod(ctrl *Controller, pod *corev1.Pod, addresses []string) error { fullPodName := joinMetaNamespaceAndName(pod.Namespace, pod.Name) if nqosState.PodSelector != nil { // if PodSelector is nil, use namespace's address set, so unnecessary to add ip here @@ -303,14 +303,14 @@ type Destination struct { NamespaceSelector labels.Selector } -func (dest *Destination) matchNamespace(podNs *v1.Namespace, qosNamespace string) bool { +func (dest *Destination) matchNamespace(podNs *corev1.Namespace, qosNamespace string) bool { if dest.NamespaceSelector == nil { return podNs.Name == qosNamespace } return dest.NamespaceSelector.Matches(labels.Set(podNs.Labels)) } -func (dest *Destination) matchPod(podNs *v1.Namespace, pod *v1.Pod, qosNamespace string) bool { +func (dest *Destination) matchPod(podNs *corev1.Namespace, pod *corev1.Pod, qosNamespace string) bool { switch { case dest.NamespaceSelector != nil && dest.PodSelector != nil: return dest.NamespaceSelector.Matches(labels.Set(podNs.Labels)) && dest.PodSelector.Matches(labels.Set(pod.Labels)) @@ -349,7 +349,7 @@ func (dest *Destination) removePod(fullPodName string, addresses []string) error func (dest *Destination) removePodsInNamespace(namespace string) error { var err error // check for pods in the namespace being cleared - dest.Pods.Range(func(key, value any) bool { + dest.Pods.Range(func(key, _ any) bool { fullPodName := key.(string) nameParts := strings.Split(fullPodName, "/") if nameParts[0] != namespace { @@ -369,7 +369,7 @@ func (dest *Destination) addPodsInNamespace(ctrl *Controller, namespace string) } pods, err := ctrl.nqosPodLister.Pods(namespace).List(podSelector) if err != nil { - if errors.IsNotFound(err) || len(pods) == 0 { + if apierrors.IsNotFound(err) || len(pods) == 0 { return nil } return fmt.Errorf("failed to look up pods in ns %s: %v", namespace, err) From 960f5bfaa45ce01cabd4a65f1a5e74aa02578244 Mon Sep 17 00:00:00 2001 From: Xiaobin Qu Date: Thu, 20 Mar 2025 10:10:50 -0700 Subject: [PATCH 13/18] NetworkQoS: support multiple ports in one classifier change `port` in classifier to `ports` which contains a list of protocol&port combinations. Signed-off-by: Xiaobin Qu Signed-off-by: Flavio Fernandes (cherry picked from commit 7ec024906380795cf366d78d29cadf5dd11ef5df) --- .../ovn/controller/network_qos/network_qos.go | 11 +-- .../network_qos/network_qos_test.go | 43 ++++++++--- .../pkg/ovn/controller/network_qos/types.go | 72 ++++++++++--------- test/e2e/networkqos.go | 25 ++++--- 4 files changed, 85 insertions(+), 66 deletions(-) diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos.go b/go-controller/pkg/ovn/controller/network_qos/network_qos.go index 6a6c642e8d..46967f3746 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos.go @@ -162,16 +162,7 @@ func (c *Controller) ensureNetworkQos(nqos *networkqosapi.NetworkQoS) error { ruleState.Classifier = &Classifier{ Destinations: destStates, } - if ruleSpec.Classifier.Port.Protocol != "" { - ruleState.Classifier.Protocol = protocol(ruleSpec.Classifier.Port.Protocol) - if !ruleState.Classifier.Protocol.IsValid() { - return fmt.Errorf("invalid protocol: %s, valid values are: TCP, UDP, SCTP", ruleSpec.Classifier.Port.Protocol) - } - } - if ruleSpec.Classifier.Port.Port > 0 { - port := int(ruleSpec.Classifier.Port.Port) - ruleState.Classifier.Port = &port - } + ruleState.Classifier.Ports = ruleSpec.Classifier.Ports rules = append(rules, ruleState) } desiredNQOSState.EgressRules = rules diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go index bda66a3c0f..83162e13a8 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go @@ -61,6 +61,9 @@ var ( app1Namespace = "app1-ns" app3Namespace = "app3-ns" + port8080 = int32(8080) + port8081 = int32(8081) + port9090 = int32(9090) ) func TestNetworkQoS(t *testing.T) { @@ -138,9 +141,15 @@ func tableEntrySetup(enableInterconnect bool) { }, }, }, - Port: nqostype.Port{ - Protocol: "tcp", - Port: 8080, + Ports: []*nqostype.Port{ + { + Protocol: "tcp", + Port: &port8080, + }, + { + Protocol: "tcp", + Port: &port8081, + }, }, }, }, @@ -170,9 +179,23 @@ func tableEntrySetup(enableInterconnect bool) { }, }, }, - Port: nqostype.Port{ - Protocol: "udp", - Port: 9090, + Ports: []*nqostype.Port{ + { + Protocol: "tcp", + Port: &port8080, + }, + { + Protocol: "tcp", + Port: &port8081, + }, + { + Protocol: "udp", + Port: &port9090, + }, + { + Protocol: "udp", + Port: &port8080, + }, }, }, }, @@ -285,14 +308,14 @@ var _ = Describe("NetworkQoS Controller", func() { Expect(err1).NotTo(HaveOccurred()) srcHashName4, _ := sourceAddrSet.GetASHashNames() dst1HashName4, _ := dst1AddrSet.GetASHashNames() - Expect(qos0.Match).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || (ip4.dst == 128.116.0.0/17 && ip4.dst != {128.116.0.0,128.116.0.255})) && tcp && tcp.dst == 8080", srcHashName4, dst1HashName4))) + Expect(qos0.Match).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || (ip4.dst == 128.116.0.0/17 && ip4.dst != {128.116.0.0,128.116.0.255})) && tcp && tcp.dst == {8080,8081}", srcHashName4, dst1HashName4))) Expect(qos0.Action).To(ContainElement(50)) Expect(qos0.Priority).To(Equal(11000)) Expect(qos0.Bandwidth).To(ContainElements(10000, 100000)) dst3AddrSet, err3 := findAddressSet(defaultAddrsetFactory, nqosNamespace, nqosName, "1", "0", defaultControllerName) Expect(err3).NotTo(HaveOccurred()) dst3HashName4, _ := dst3AddrSet.GetASHashNames() - Expect(qos1.Match).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || (ip4.dst == 128.118.0.0/17 && ip4.dst != {128.118.0.0,128.118.0.255})) && udp && udp.dst == 9090", srcHashName4, dst3HashName4))) + Expect(qos1.Match).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || (ip4.dst == 128.118.0.0/17 && ip4.dst != {128.118.0.0,128.118.0.255})) && ((tcp && tcp.dst == {8080,8081}) || (udp && udp.dst == {9090,8080}))", srcHashName4, dst3HashName4))) } app1Pod := &corev1.Pod{ @@ -338,7 +361,7 @@ var _ = Describe("NetworkQoS Controller", func() { return err.Error() } return qos.Match - }).WithTimeout(5 * time.Second).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || (ip4.dst == 128.116.0.0/17 && ip4.dst != {128.116.0.0,128.116.0.255})) && tcp && tcp.dst == 8080", srcHashName4, dst1HashName4))) + }).WithTimeout(5 * time.Second).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || (ip4.dst == 128.116.0.0/17 && ip4.dst != {128.116.0.0,128.116.0.255})) && tcp && tcp.dst == {8080,8081}", srcHashName4, dst1HashName4))) dst3AddrSet, err3 := findAddressSet(defaultAddrsetFactory, nqosNamespace, nqosName, "1", "0", defaultControllerName) Expect(err3).NotTo(HaveOccurred()) @@ -349,7 +372,7 @@ var _ = Describe("NetworkQoS Controller", func() { return err.Error() } return qos.Match - }).WithTimeout(5 * time.Second).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || ip4.dst == 128.118.0.0/17) && udp && udp.dst == 9090", srcHashName4, dst3HashName4))) + }).WithTimeout(5 * time.Second).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || ip4.dst == 128.118.0.0/17) && ((tcp && tcp.dst == {8080,8081}) || (udp && udp.dst == {9090,8080}))", srcHashName4, dst3HashName4))) } By("removes IP from destination address set if pod's labels don't match the selector") diff --git a/go-controller/pkg/ovn/controller/network_qos/types.go b/go-controller/pkg/ovn/controller/network_qos/types.go index 2d9dd8a36b..b390c038a8 100644 --- a/go-controller/pkg/ovn/controller/network_qos/types.go +++ b/go-controller/pkg/ovn/controller/network_qos/types.go @@ -3,6 +3,7 @@ package networkqos import ( "fmt" "slices" + "sort" "strconv" "strings" "sync" @@ -15,6 +16,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" + networkqosv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -199,21 +201,6 @@ type GressRule struct { Burst *int } -type protocol string - -func (p protocol) IsValid() bool { - switch p.String() { - case "tcp", "udp", "sctp": - return true - default: - return false - } -} - -func (p protocol) String() string { - return strings.ToLower(string(p)) -} - type trafficDirection string const ( @@ -223,10 +210,7 @@ const ( type Classifier struct { Destinations []*Destination - - // port - Protocol protocol - Port *int + Ports []*networkqosv1alpha1.Port } // ToQosMatchString generates dest and protocol/port part of QoS match string, based on @@ -275,21 +259,43 @@ func (c *Classifier) ToQosMatchString(ipv4Enabled, ipv6Enabled bool) string { if strings.Contains(output, "||") { output = fmt.Sprintf("(%s)", output) } - if c.Protocol != "" { - if c.Port != nil && *c.Port > 0 { - match := fmt.Sprintf("%s && %s.dst == %d", c.Protocol.String(), c.Protocol.String(), *c.Port) - if output != "" { - output = fmt.Sprintf("%s && %s", output, match) - } else { - output = match - } - } else { - if output != "" { - output = fmt.Sprintf("%s && %s", output, c.Protocol.String()) - } else { - output = c.Protocol.String() - } + protoPortMap := map[string][]string{} + for _, port := range c.Ports { + if port.Protocol == "" { + continue + } + protocol := strings.ToLower(port.Protocol) + ports := protoPortMap[protocol] + if ports == nil { + ports = []string{} + } + if port.Port != nil { + ports = append(ports, fmt.Sprintf("%d", *port.Port)) } + protoPortMap[protocol] = ports + } + + sortedProtocols := make([]string, 0, len(protoPortMap)) + for protocol := range protoPortMap { + sortedProtocols = append(sortedProtocols, protocol) + } + sort.Strings(sortedProtocols) + + portMatches := []string{} + for _, protocol := range sortedProtocols { + ports := protoPortMap[protocol] + match := protocol + if len(ports) == 1 { + match = fmt.Sprintf("%s && %s.dst == %s", protocol, protocol, ports[0]) + } else if len(ports) > 1 { + match = fmt.Sprintf("%s && %s.dst == {%s}", protocol, protocol, strings.Join(ports, ",")) + } + portMatches = append(portMatches, match) + } + if len(portMatches) == 1 { + output = fmt.Sprintf("%s && %s", output, portMatches[0]) + } else if len(portMatches) > 1 { + output = fmt.Sprintf("%s && ((%s))", output, strings.Join(portMatches, ") || (")) } return output } diff --git a/test/e2e/networkqos.go b/test/e2e/networkqos.go index 9bacc84188..5dd65229c6 100644 --- a/test/e2e/networkqos.go +++ b/test/e2e/networkqos.go @@ -201,8 +201,8 @@ spec: app: nqos-test - dscp: %d classifier: - port: - protocol: TCP + ports: + - protocol: TCP to: - podSelector: matchLabels: @@ -212,8 +212,8 @@ spec: app: nqos-test - dscp: %d classifier: - port: - protocol: TCP + ports: + - protocol: TCP port: 80 to: - podSelector: @@ -329,8 +329,8 @@ spec: cidr: %s - dscp: %d classifier: - port: - protocol: TCP + ports: + - protocol: TCP to: - ipBlock: cidr: %s @@ -338,8 +338,8 @@ spec: cidr: %s - dscp: %d classifier: - port: - protocol: TCP + ports: + - protocol: TCP port: 80 to: - ipBlock: @@ -479,9 +479,8 @@ spec: bandwidth: rate: %d classifier: - port: - protocol: TCP - + ports: + - protocol: TCP to: - podSelector: matchLabels: @@ -544,8 +543,8 @@ spec: bandwidth: rate: %d classifier: - port: - protocol: TCP + ports: + - protocol: TCP port: 80 to: - podSelector: From cb5b8066b4374cd7df839ab8f6ceb92fdcc0b31d Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Fri, 11 Apr 2025 14:48:02 +0000 Subject: [PATCH 14/18] update crd from update-codegen.sh cd go-controller && ./hack/update-codegen.sh Signed-off-by: Flavio Fernandes (cherry picked from commit ef217b1ba162d1c28cf38939dc0e6e86d6d70462) --- .../k8s.ovn.org_networkqoses.yaml.j2 | 448 +++++++++++++++--- .../networkqos/v1alpha1/classifier.go | 23 +- .../networkqos/v1alpha1/spec.go | 26 +- .../v1alpha1/zz_generated.deepcopy.go | 29 +- 4 files changed, 441 insertions(+), 85 deletions(-) diff --git a/dist/templates/k8s.ovn.org_networkqoses.yaml.j2 b/dist/templates/k8s.ovn.org_networkqoses.yaml.j2 index 716e4f89b5..f205c8028f 100644 --- a/dist/templates/k8s.ovn.org_networkqoses.yaml.j2 +++ b/dist/templates/k8s.ovn.org_networkqoses.yaml.j2 @@ -87,23 +87,25 @@ spec: This field is optional, and in case it is not set the rule is applied to all egress traffic regardless of the destination. properties: - port: - description: |- - Port specifies destination protocol and port on which NetworkQoS - rule is applied - properties: - port: - description: port that the traffic must match - format: int32 - maximum: 65535 - minimum: 1 - type: integer - protocol: - description: protocol (tcp, udp, sctp) that the traffic - must match. - pattern: ^TCP|UDP|SCTP$ - type: string - type: object + ports: + items: + description: |- + Port specifies destination protocol and port on which NetworkQoS + rule is applied + properties: + port: + description: port that the traffic must match + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + description: protocol (tcp, udp, sctp) that the traffic + must match. + pattern: ^TCP|UDP|SCTP$ + type: string + type: object + type: array to: items: description: |- @@ -254,60 +256,390 @@ spec: required: - dscp type: object + maxItems: 20 type: array - netAttachRefs: + networkSelectors: description: |- - netAttachRefs points to a list of objects which could be either NAD, UDN, or Cluster UDN. - In the case of NAD, the network type could be of type Layer-3, Layer-2, or Localnet. - If not specified, then the primary network of the selected Pods will be chosen. + networkSelector selects the networks on which the pod IPs need to be added to the source address set. + NetworkQoS controller currently supports `NetworkAttachmentDefinitions` type only. items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: NetworkSelector selects a set of networks. properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: + clusterUserDefinedNetworkSelector: description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: + clusterUserDefinedNetworkSelector selects ClusterUserDefinedNetworks when + NetworkSelectionType is 'ClusterUserDefinedNetworks'. + properties: + networkSelector: + description: |- + networkSelector selects ClusterUserDefinedNetworks by label. A null + selector will mot match anything, while an empty ({}) selector will match + all. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - networkSelector + type: object + networkAttachmentDefinitionSelector: description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + networkAttachmentDefinitionSelector selects networks defined in the + selected NetworkAttachmentDefinitions when NetworkSelectionType is + 'SecondaryUserDefinedNetworks'. + properties: + namespaceSelector: + description: |- + namespaceSelector selects namespaces where the + NetworkAttachmentDefinitions are defined. This field follows standard + label selector semantics. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + networkSelector: + description: |- + networkSelector selects NetworkAttachmentDefinitions within the selected + namespaces by label. This field follows standard label selector + semantics. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - namespaceSelector + - networkSelector + type: object + networkSelectionType: + description: networkSelectionType determines the type of networks + selected. + enum: + - DefaultNetwork + - ClusterUserDefinedNetworks + - PrimaryUserDefinedNetworks + - SecondaryUserDefinedNetworks + - NetworkAttachmentDefinitions type: string - resourceVersion: + primaryUserDefinedNetworkSelector: description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: + primaryUserDefinedNetworkSelector selects primary UserDefinedNetworks when + NetworkSelectionType is 'PrimaryUserDefinedNetworks'. + properties: + namespaceSelector: + description: |- + namespaceSelector select the primary UserDefinedNetworks that are servind + the selected namespaces. This field follows standard label selector + semantics. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - namespaceSelector + type: object + secondaryUserDefinedNetworkSelector: description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string + secondaryUserDefinedNetworkSelector selects secondary UserDefinedNetworks + when NetworkSelectionType is 'SecondaryUserDefinedNetworks'. + properties: + namespaceSelector: + description: |- + namespaceSelector selects namespaces where the secondary + UserDefinedNetworks are defined. This field follows standard label + selector semantics. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + networkSelector: + description: |- + networkSelector selects secondary UserDefinedNetworks within the selected + namespaces by label. This field follows standard label selector + semantics. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - namespaceSelector + - networkSelector + type: object + required: + - networkSelectionType type: object - x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: 'Inconsistent selector: both networkSelectionType ClusterUserDefinedNetworks + and clusterUserDefinedNetworkSelector have to be set or neither' + rule: '!has(self.networkSelectionType) ? true : has(self.clusterUserDefinedNetworkSelector) + ? self.networkSelectionType == ''ClusterUserDefinedNetworks'' + : self.networkSelectionType != ''ClusterUserDefinedNetworks''' + - message: 'Inconsistent selector: both networkSelectionType PrimaryUserDefinedNetworks + and primaryUserDefinedNetworkSelector have to be set or neither' + rule: '!has(self.networkSelectionType) ? true : has(self.primaryUserDefinedNetworkSelector) + ? self.networkSelectionType == ''PrimaryUserDefinedNetworks'' + : self.networkSelectionType != ''PrimaryUserDefinedNetworks''' + - message: 'Inconsistent selector: both networkSelectionType SecondaryUserDefinedNetworks + and secondaryUserDefinedNetworkSelector have to be set or neither' + rule: '!has(self.networkSelectionType) ? true : has(self.secondaryUserDefinedNetworkSelector) + ? self.networkSelectionType == ''SecondaryUserDefinedNetworks'' + : self.networkSelectionType != ''SecondaryUserDefinedNetworks''' + - message: 'Inconsistent selector: both networkSelectionType NetworkAttachmentDefinitions + and networkAttachmentDefinitionSelector have to be set or neither' + rule: '!has(self.networkSelectionType) ? true : has(self.networkAttachmentDefinitionSelector) + ? self.networkSelectionType == ''NetworkAttachmentDefinitions'' + : self.networkSelectionType != ''NetworkAttachmentDefinitions''' + maxItems: 5 + minItems: 1 type: array + x-kubernetes-list-map-keys: + - networkSelectionType + x-kubernetes-list-type: map x-kubernetes-validations: - - message: netAttachRefs is immutable + - message: networkSelector is immutable rule: self == oldSelf + - message: Unsupported network selection type + rule: self.all(sel, sel.networkSelectionType == 'ClusterUserDefinedNetworks' + || sel.networkSelectionType == 'NetworkAttachmentDefinitions') podSelector: description: |- podSelector applies the NetworkQoS rule only to the pods in the namespace whose label diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/classifier.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/classifier.go index 901ece9260..01c1546427 100644 --- a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/classifier.go +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/classifier.go @@ -17,11 +17,15 @@ limitations under the License. package v1alpha1 +import ( + networkqosv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" +) + // ClassifierApplyConfiguration represents a declarative configuration of the Classifier type for use // with apply. type ClassifierApplyConfiguration struct { - To []DestinationApplyConfiguration `json:"to,omitempty"` - Port *PortApplyConfiguration `json:"port,omitempty"` + To []DestinationApplyConfiguration `json:"to,omitempty"` + Ports []*networkqosv1alpha1.Port `json:"ports,omitempty"` } // ClassifierApplyConfiguration constructs a declarative configuration of the Classifier type for use with @@ -43,10 +47,15 @@ func (b *ClassifierApplyConfiguration) WithTo(values ...*DestinationApplyConfigu return b } -// WithPort sets the Port field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Port field is set to the value of the last call. -func (b *ClassifierApplyConfiguration) WithPort(value *PortApplyConfiguration) *ClassifierApplyConfiguration { - b.Port = value +// WithPorts adds the given value to the Ports field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ports field. +func (b *ClassifierApplyConfiguration) WithPorts(values ...**networkqosv1alpha1.Port) *ClassifierApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPorts") + } + b.Ports = append(b.Ports, *values[i]) + } return b } diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/spec.go b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/spec.go index 520e2a20e4..848cbe073d 100644 --- a/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/spec.go +++ b/go-controller/pkg/crd/networkqos/v1alpha1/apis/applyconfiguration/networkqos/v1alpha1/spec.go @@ -18,17 +18,17 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/client-go/applyconfigurations/meta/v1" + types "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // SpecApplyConfiguration represents a declarative configuration of the Spec type for use // with apply. type SpecApplyConfiguration struct { - NetworkAttachmentRefs []v1.ObjectReference `json:"netAttachRefs,omitempty"` - PodSelector *metav1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` - Priority *int `json:"priority,omitempty"` - Egress []RuleApplyConfiguration `json:"egress,omitempty"` + NetworkSelectors *types.NetworkSelectors `json:"networkSelectors,omitempty"` + PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` + Priority *int `json:"priority,omitempty"` + Egress []RuleApplyConfiguration `json:"egress,omitempty"` } // SpecApplyConfiguration constructs a declarative configuration of the Spec type for use with @@ -37,20 +37,18 @@ func Spec() *SpecApplyConfiguration { return &SpecApplyConfiguration{} } -// WithNetworkAttachmentRefs adds the given value to the NetworkAttachmentRefs field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the NetworkAttachmentRefs field. -func (b *SpecApplyConfiguration) WithNetworkAttachmentRefs(values ...v1.ObjectReference) *SpecApplyConfiguration { - for i := range values { - b.NetworkAttachmentRefs = append(b.NetworkAttachmentRefs, values[i]) - } +// WithNetworkSelectors sets the NetworkSelectors field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkSelectors field is set to the value of the last call. +func (b *SpecApplyConfiguration) WithNetworkSelectors(value types.NetworkSelectors) *SpecApplyConfiguration { + b.NetworkSelectors = &value return b } // WithPodSelector sets the PodSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodSelector field is set to the value of the last call. -func (b *SpecApplyConfiguration) WithPodSelector(value *metav1.LabelSelectorApplyConfiguration) *SpecApplyConfiguration { +func (b *SpecApplyConfiguration) WithPodSelector(value *v1.LabelSelectorApplyConfiguration) *SpecApplyConfiguration { b.PodSelector = value return b } diff --git a/go-controller/pkg/crd/networkqos/v1alpha1/zz_generated.deepcopy.go b/go-controller/pkg/crd/networkqos/v1alpha1/zz_generated.deepcopy.go index 407bc5a4e6..720119ff8a 100644 --- a/go-controller/pkg/crd/networkqos/v1alpha1/zz_generated.deepcopy.go +++ b/go-controller/pkg/crd/networkqos/v1alpha1/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package v1alpha1 import ( - corev1 "k8s.io/api/core/v1" + types "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" networkingv1 "k8s.io/api/networking/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -53,7 +53,17 @@ func (in *Classifier) DeepCopyInto(out *Classifier) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - out.Port = in.Port + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]*Port, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Port) + (*in).DeepCopyInto(*out) + } + } + } return } @@ -162,6 +172,11 @@ func (in *NetworkQoSList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Port) DeepCopyInto(out *Port) { *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } return } @@ -196,10 +211,12 @@ func (in *Rule) DeepCopy() *Rule { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Spec) DeepCopyInto(out *Spec) { *out = *in - if in.NetworkAttachmentRefs != nil { - in, out := &in.NetworkAttachmentRefs, &out.NetworkAttachmentRefs - *out = make([]corev1.ObjectReference, len(*in)) - copy(*out, *in) + if in.NetworkSelectors != nil { + in, out := &in.NetworkSelectors, &out.NetworkSelectors + *out = make(types.NetworkSelectors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } in.PodSelector.DeepCopyInto(&out.PodSelector) if in.Egress != nil { From 981e8286f70417696ed3ac2672bb3717403ab24e Mon Sep 17 00:00:00 2001 From: Xiaobin Qu Date: Thu, 24 Apr 2025 11:20:20 -0700 Subject: [PATCH 15/18] refactor networkqos handlers - namespace/pod/node handlers detect possible changes and trigger reconciliation by putting networkqos in event queue. - reconcile networkqos rules in networkqos handler only Signed-off-by: Xiaobin Qu Signed-off-by: Flavio Fernandes (cherry picked from commit 7f34252495c9de9e84587195a02a5172fb2e1f0d) --- .../ovn/controller/network_qos/network_qos.go | 152 +++++----- .../network_qos/network_qos_controller.go | 244 +++++++++++------ .../network_qos/network_qos_namespace.go | 225 +++++++++------ .../network_qos/network_qos_node.go | 115 +------- .../network_qos/network_qos_ovnnb.go | 2 +- .../controller/network_qos/network_qos_pod.go | 259 ++++++++++-------- .../network_qos/network_qos_test.go | 61 +++-- .../pkg/ovn/controller/network_qos/repair.go | 6 +- .../pkg/ovn/controller/network_qos/types.go | 90 +++--- 9 files changed, 633 insertions(+), 521 deletions(-) diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos.go b/go-controller/pkg/ovn/controller/network_qos/network_qos.go index 46967f3746..86e280ca93 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos.go @@ -14,8 +14,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" metaapplyv1 "k8s.io/client-go/applyconfigurations/meta/v1" - "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" "k8s.io/utils/ptr" @@ -29,50 +29,38 @@ import ( func (c *Controller) processNextNQOSWorkItem(wg *sync.WaitGroup) bool { wg.Add(1) defer wg.Done() - nqosKey, quit := c.nqosQueue.Get() + nqos, quit := c.nqosQueue.Get() if quit { return false } - defer c.nqosQueue.Done(nqosKey) + defer c.nqosQueue.Done(nqos) - err := c.syncNetworkQoS(nqosKey) - if err == nil { - c.nqosQueue.Forget(nqosKey) - return true - } - utilruntime.HandleError(fmt.Errorf("%s: failed to handle key %s, error: %v", c.controllerName, nqosKey, err)) - - if c.nqosQueue.NumRequeues(nqosKey) < maxRetries { - c.nqosQueue.AddRateLimited(nqosKey) - return true + if err := c.syncNetworkQoS(nqos); err != nil { + if c.nqosQueue.NumRequeues(nqos) < maxRetries { + c.nqosQueue.AddRateLimited(nqos) + return true + } + klog.Warningf("%s: Failed to reconcile NetworkQoS %s/%s: %v", c.controllerName, nqos.Namespace, nqos.Name, err) + utilruntime.HandleError(fmt.Errorf("failed to reconcile NetworkQoS %s/%s: %v", nqos.Namespace, nqos.Name, err)) } - - c.nqosQueue.Forget(nqosKey) + c.nqosQueue.Forget(nqos) return true } // syncNetworkQoS decides the main logic everytime // we dequeue a key from the nqosQueue cache -func (c *Controller) syncNetworkQoS(key string) error { +func (c *Controller) syncNetworkQoS(nqos *networkqosapi.NetworkQoS) error { startTime := time.Now() - nqosNamespace, nqosName, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - return err - } - klog.V(5).Infof("%s - Processing sync for Network QoS %s", c.controllerName, nqosName) - + key := joinMetaNamespaceAndName(nqos.Namespace, nqos.Name) defer func() { - klog.V(5).Infof("%s - Finished syncing Network QoS %s : %v", c.controllerName, nqosName, time.Since(startTime)) + klog.V(5).Infof("%s - Finished reconciling NetworkQoS %s : %v", c.controllerName, key, time.Since(startTime)) }() - nqos, err := c.nqosLister.NetworkQoSes(nqosNamespace).Get(nqosName) - if err != nil && !apierrors.IsNotFound(err) { - return err - } - if nqos == nil { - klog.V(6).Infof("%s - NetworkQoS %s no longer exists.", c.controllerName, key) + klog.V(5).Infof("%s - reconciling NetworkQoS %s", c.controllerName, key) + if nqos.DeletionTimestamp != nil { + klog.V(6).Infof("%s - NetworkQoS %s is being deleted.", c.controllerName, key) return c.nqosCache.DoWithLock(key, func(_ string) error { - return c.clearNetworkQos(nqosNamespace, nqosName) + return c.clearNetworkQos(nqos.Namespace, nqos.Name) }) } @@ -81,7 +69,7 @@ func (c *Controller) syncNetworkQoS(key string) error { } else if !networkManagedByMe { // maybe NetworkAttachmentName has been changed from this one to other value, try cleanup anyway return c.nqosCache.DoWithLock(key, func(_ string) error { - return c.clearNetworkQos(nqosNamespace, nqosName) + return c.clearNetworkQos(nqos.Namespace, nqos.Name) }) } @@ -90,7 +78,7 @@ func (c *Controller) syncNetworkQoS(key string) error { return c.nqosCache.DoWithLock(key, func(_ string) error { // save key to avoid racing c.nqosCache.Store(key, nil) - if err = c.ensureNetworkQos(nqos); err != nil { + if err := c.ensureNetworkQos(nqos); err != nil { c.nqosCache.Delete(key) // we can ignore the error if status update doesn't succeed; best effort c.updateNQOSStatusToNotReady(nqos.Namespace, nqos.Name, "failed to reconcile", err) @@ -250,6 +238,11 @@ func (c *Controller) updateNQOSStatusToNotReady(namespace, name, reason string, func (c *Controller) updateNQOStatusCondition(newCondition metav1.Condition, namespace, name string) error { nqos, err := c.nqosLister.NetworkQoSes(namespace).Get(name) if err != nil { + if apierrors.IsNotFound(err) { + // Resource was deleted, log it + klog.V(5).Infof("NetworkQoS %s/%s updating status but not found, ignoring", namespace, name) + return nil + } return err } @@ -280,23 +273,31 @@ func (c *Controller) resyncPods(nqosState *networkQoSState) error { return fmt.Errorf("failed to list pods in namespace %s: %w", nqosState.namespace, err) } nsCache := make(map[string]*corev1.Namespace) + addressSetMap := map[string]sets.Set[string]{} for _, pod := range pods { - if pod.Spec.HostNetwork { + if pod.Spec.HostNetwork || pod.DeletionTimestamp != nil { continue } ns := nsCache[pod.Namespace] if ns == nil { ns, err = c.nqosNamespaceLister.Get(pod.Namespace) if err != nil { + if apierrors.IsNotFound(err) { + klog.Warningf("Namespace %s not found, skipping pod %s/%s", pod.Namespace, pod.Namespace, pod.Name) + continue + } return fmt.Errorf("failed to get namespace %s: %w", pod.Namespace, err) } nsCache[pod.Namespace] = ns } - if err := c.setPodForNQOS(pod, nqosState, ns); err != nil { + if ns.DeletionTimestamp != nil { + continue + } + if err := c.setPodForNQOS(pod, nqosState, ns, addressSetMap); err != nil { return err } } - return nil + return nqosState.cleanupStaleAddresses(addressSetMap) } var cudnController = udnv1.SchemeGroupVersion.WithKind("ClusterUserDefinedNetwork") @@ -307,16 +308,16 @@ func (c *Controller) networkManagedByMe(networkSelectors crdtypes.NetworkSelecto return c.IsDefault(), nil } var selectedNads []*nadv1.NetworkAttachmentDefinition + var err error for _, networkSelector := range networkSelectors { switch networkSelector.NetworkSelectionType { case crdtypes.DefaultNetwork: return c.IsDefault(), nil case crdtypes.ClusterUserDefinedNetworks: - nadSelector, err := metav1.LabelSelectorAsSelector(&networkSelector.ClusterUserDefinedNetworkSelector.NetworkSelector) - if err != nil { - return false, err + if networkSelector.ClusterUserDefinedNetworkSelector == nil { + return false, fmt.Errorf("empty cluster user defined network selector") } - nads, err := c.nadLister.List(nadSelector) + nads, err := c.getNetAttachDefsBySelectors(nil, &networkSelector.ClusterUserDefinedNetworkSelector.NetworkSelector) if err != nil { return false, err } @@ -333,38 +334,10 @@ func (c *Controller) networkManagedByMe(networkSelectors crdtypes.NetworkSelecto if networkSelector.NetworkAttachmentDefinitionSelector == nil { return false, fmt.Errorf("empty network attachment definition selector") } - nadSelector, err := metav1.LabelSelectorAsSelector(&networkSelector.NetworkAttachmentDefinitionSelector.NetworkSelector) - if err != nil { - return false, err - } - if nadSelector.Empty() { - return false, fmt.Errorf("empty network selector") - } - nsSelector, err := metav1.LabelSelectorAsSelector(&networkSelector.NetworkAttachmentDefinitionSelector.NamespaceSelector) + selectedNads, err = c.getNetAttachDefsBySelectors(&networkSelector.NetworkAttachmentDefinitionSelector.NamespaceSelector, &networkSelector.NetworkAttachmentDefinitionSelector.NetworkSelector) if err != nil { return false, err } - - if nsSelector.Empty() { - // if namespace selector is empty, list NADs in all namespaces - nads, err := c.nadLister.List(nadSelector) - if err != nil { - return false, err - } - selectedNads = append(selectedNads, nads...) - } else { - namespaces, err := c.nqosNamespaceLister.List(nsSelector) - if err != nil { - return false, err - } - for _, ns := range namespaces { - nads, err := c.nadLister.NetworkAttachmentDefinitions(ns.Name).List(nadSelector) - if err != nil { - return false, err - } - selectedNads = append(selectedNads, nads...) - } - } default: return false, fmt.Errorf("unsupported network selection type %s", networkSelector.NetworkSelectionType) } @@ -394,3 +367,46 @@ func (c *Controller) getLogicalSwitchName(nodeName string) string { return "" } } + +func (c *Controller) getAllNetworkQoSes() ([]*networkqosapi.NetworkQoS, error) { + nqoses, err := c.nqosLister.List(labels.Everything()) + if err != nil { + return nil, fmt.Errorf("failed to list NetworkQoS: %v", err) + } + return nqoses, nil +} + +func (c *Controller) getNetAttachDefsBySelectors(namespaceSelector, nadSelector *metav1.LabelSelector) ([]*nadv1.NetworkAttachmentDefinition, error) { + if nadSelector == nil || nadSelector.Size() == 0 { + return nil, fmt.Errorf("empty network selector") + } + nadSel, err := metav1.LabelSelectorAsSelector(nadSelector) + if err != nil { + return nil, fmt.Errorf("invalid network selector %v: %v", nadSelector.String(), err) + } + var selectedNads []*nadv1.NetworkAttachmentDefinition + if namespaceSelector != nil && namespaceSelector.Size() > 0 { + nsSelector, err := metav1.LabelSelectorAsSelector(namespaceSelector) + if err != nil { + return nil, fmt.Errorf("invalid namespace selector %v: %v", namespaceSelector.String(), err) + } + namespaces, err := c.nqosNamespaceLister.List(nsSelector) + if err != nil { + return nil, fmt.Errorf("failed to list namespaces: %v", err) + } + for _, ns := range namespaces { + nads, err := c.nadLister.NetworkAttachmentDefinitions(ns.Name).List(nadSel) + if err != nil { + return nil, fmt.Errorf("failed to list NADs in namespace %s: %v", ns.Name, err) + } + selectedNads = append(selectedNads, nads...) + } + } else { + nads, err := c.nadLister.List(nadSel) + if err != nil { + return nil, fmt.Errorf("failed to list NADs: %v", err) + } + selectedNads = append(selectedNads, nads...) + } + return selectedNads, nil +} diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go index e997271412..59ff1969c4 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go @@ -10,6 +10,7 @@ import ( nadlisterv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -66,23 +67,22 @@ type Controller struct { // store's the name of the zone that this controller belongs to zone string - // nqos namespace+name is key -> cloned value of NQOS kapi is value - //nqosCache map[string]*networkQoSState + // namespace+name -> cloned value of NetworkQoS nqosCache *syncmap.SyncMap[*networkQoSState] // queues for the CRDs where incoming work is placed to de-dup - nqosQueue workqueue.TypedRateLimitingInterface[string] + nqosQueue workqueue.TypedRateLimitingInterface[*networkqosapi.NetworkQoS] // cached access to nqos objects nqosLister networkqoslister.NetworkQoSLister nqosCacheSynced cache.InformerSynced // namespace queue, cache, lister nqosNamespaceLister corev1listers.NamespaceLister nqosNamespaceSynced cache.InformerSynced - nqosNamespaceQueue workqueue.TypedRateLimitingInterface[string] + nqosNamespaceQueue workqueue.TypedRateLimitingInterface[*eventData[*corev1.Namespace]] // pod queue, cache, lister nqosPodLister corev1listers.PodLister nqosPodSynced cache.InformerSynced - nqosPodQueue workqueue.TypedRateLimitingInterface[string] + nqosPodQueue workqueue.TypedRateLimitingInterface[*eventData[*corev1.Pod]] // node queue, cache, lister nqosNodeLister corev1listers.NodeLister nqosNodeSynced cache.InformerSynced @@ -93,6 +93,36 @@ type Controller struct { nadSynced cache.InformerSynced } +type eventData[T metav1.Object] struct { + old T + new T +} + +func newEventData[T metav1.Object](old T, new T) *eventData[T] { + return &eventData[T]{ + old: old, + new: new, + } +} + +func (e *eventData[T]) name() string { + if !reflect.ValueOf(e.old).IsNil() { + return e.old.GetName() + } else if !reflect.ValueOf(e.new).IsNil() { + return e.new.GetName() + } + return "" +} + +func (e *eventData[T]) namespace() string { + if !reflect.ValueOf(e.old).IsNil() { + return e.old.GetNamespace() + } else if !reflect.ValueOf(e.new).IsNil() { + return e.new.GetNamespace() + } + return "" +} + // NewController returns a new *Controller. func NewController( controllerName string, @@ -125,8 +155,8 @@ func NewController( c.nqosLister = nqosInformer.Lister() c.nqosCacheSynced = nqosInformer.Informer().HasSynced c.nqosQueue = workqueue.NewTypedRateLimitingQueueWithConfig( - workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), - workqueue.TypedRateLimitingQueueConfig[string]{Name: "networkQoS"}, + workqueue.NewTypedItemFastSlowRateLimiter[*networkqosapi.NetworkQoS](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[*networkqosapi.NetworkQoS]{Name: "networkQoS"}, ) _, err := nqosInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onNQOSAdd, @@ -141,8 +171,8 @@ func NewController( c.nqosNamespaceLister = namespaceInformer.Lister() c.nqosNamespaceSynced = namespaceInformer.Informer().HasSynced c.nqosNamespaceQueue = workqueue.NewTypedRateLimitingQueueWithConfig( - workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), - workqueue.TypedRateLimitingQueueConfig[string]{Name: "nqosNamespaces"}, + workqueue.NewTypedItemFastSlowRateLimiter[*eventData[*corev1.Namespace]](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[*eventData[*corev1.Namespace]]{Name: "nqosNamespaces"}, ) _, err = namespaceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onNQOSNamespaceAdd, @@ -157,8 +187,8 @@ func NewController( c.nqosPodLister = podInformer.Lister() c.nqosPodSynced = podInformer.Informer().HasSynced c.nqosPodQueue = workqueue.NewTypedRateLimitingQueueWithConfig( - workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), - workqueue.TypedRateLimitingQueueConfig[string]{Name: "nqosPods"}, + workqueue.NewTypedItemFastSlowRateLimiter[*eventData[*corev1.Pod]](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[*eventData[*corev1.Pod]]{Name: "nqosPods"}, ) _, err = podInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onNQOSPodAdd, @@ -303,20 +333,27 @@ func (c *Controller) runNQOSNodeWorker(wg *sync.WaitGroup) { // handlers // onNQOSAdd queues the NQOS for processing. -func (c *Controller) onNQOSAdd(obj interface{}) { - key, err := cache.MetaNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) +func (c *Controller) onNQOSAdd(obj any) { + nqos, ok := obj.(*networkqosapi.NetworkQoS) + if !ok { + utilruntime.HandleError(fmt.Errorf("expecting NetworkQoS but received %T", obj)) return } - c.nqosQueue.Add(key) + c.nqosQueue.Add(nqos) } // onNQOSUpdate updates the NQOS Selector in the cache and queues the NQOS for processing. -func (c *Controller) onNQOSUpdate(oldObj, newObj interface{}) { - oldNQOS := oldObj.(*networkqosapi.NetworkQoS) - newNQOS := newObj.(*networkqosapi.NetworkQoS) - +func (c *Controller) onNQOSUpdate(oldObj, newObj any) { + oldNQOS, ok := oldObj.(*networkqosapi.NetworkQoS) + if !ok { + utilruntime.HandleError(fmt.Errorf("expecting NetworkQoS but received %T", oldObj)) + return + } + newNQOS, ok := newObj.(*networkqosapi.NetworkQoS) + if !ok { + utilruntime.HandleError(fmt.Errorf("expecting NetworkQoS but received %T", newObj)) + return + } // don't process resync or objects that are marked for deletion if oldNQOS.ResourceVersion == newNQOS.ResourceVersion || !newNQOS.GetDeletionTimestamp().IsZero() { @@ -325,43 +362,60 @@ func (c *Controller) onNQOSUpdate(oldObj, newObj interface{}) { if reflect.DeepEqual(oldNQOS.Spec, newNQOS.Spec) { return } - key, err := cache.MetaNamespaceKeyFunc(newObj) - if err == nil { - // updates to NQOS object should be very rare, once put in place they usually stay the same - klog.V(4).Infof("Updating Network QoS %s: nqosSpec %v", - key, newNQOS.Spec) - c.nqosQueue.Add(key) - } + c.nqosQueue.Add(newNQOS) } // onNQOSDelete queues the NQOS for processing. func (c *Controller) onNQOSDelete(obj interface{}) { - key, err := cache.MetaNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) - return + nqos, ok := obj.(*networkqosapi.NetworkQoS) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) + return + } + nqos, ok = tombstone.Obj.(*networkqosapi.NetworkQoS) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a NetworkQoS: %#v", tombstone.Obj)) + return + } + } + if nqos != nil { + c.nqosQueue.Add(nqos) } - c.nqosQueue.Add(key) } // onNQOSNamespaceAdd queues the namespace for processing. func (c *Controller) onNQOSNamespaceAdd(obj interface{}) { - key, err := cache.MetaNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + ns, ok := obj.(*corev1.Namespace) + if !ok { + utilruntime.HandleError(fmt.Errorf("expecting Namespace but received %T", obj)) + return + } + if ns == nil { + utilruntime.HandleError(fmt.Errorf("empty namespace")) return } - c.nqosNamespaceQueue.Add(key) + c.nqosNamespaceQueue.Add(newEventData(nil, ns)) } // onNQOSNamespaceUpdate queues the namespace for processing. func (c *Controller) onNQOSNamespaceUpdate(oldObj, newObj interface{}) { - oldNamespace := oldObj.(*corev1.Namespace) - newNamespace := newObj.(*corev1.Namespace) - - // don't process resync or objects that are marked for deletion - if oldNamespace.ResourceVersion == newNamespace.ResourceVersion || - !newNamespace.GetDeletionTimestamp().IsZero() { + oldNamespace, ok := oldObj.(*corev1.Namespace) + if !ok { + utilruntime.HandleError(fmt.Errorf("expecting Namespace but received %T", oldObj)) + return + } + newNamespace, ok := newObj.(*corev1.Namespace) + if !ok { + utilruntime.HandleError(fmt.Errorf("expecting Namespace but received %T", newObj)) + return + } + if oldNamespace == nil || newNamespace == nil { + utilruntime.HandleError(fmt.Errorf("empty namespace")) + return + } + if oldNamespace.ResourceVersion == newNamespace.ResourceVersion || !newNamespace.GetDeletionTimestamp().IsZero() { return } // If the labels have not changed, then there's no change that we care about: return. @@ -370,41 +424,60 @@ func (c *Controller) onNQOSNamespaceUpdate(oldObj, newObj interface{}) { if labels.Equals(oldNamespaceLabels, newNamespaceLabels) { return } - key, err := cache.MetaNamespaceKeyFunc(newObj) - if err == nil { - klog.V(5).Infof("Updating Namespace in Network QoS controller %s: "+ - "namespaceLabels: %v", key, newNamespaceLabels) - c.nqosNamespaceQueue.Add(key) - } + klog.V(5).Infof("Namespace %s labels have changed: %v", newNamespace.Name, newNamespaceLabels) + c.nqosNamespaceQueue.Add(newEventData(oldNamespace, newNamespace)) } // onNQOSNamespaceDelete queues the namespace for processing. func (c *Controller) onNQOSNamespaceDelete(obj interface{}) { - key, err := cache.MetaNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) - return + ns, ok := obj.(*corev1.Namespace) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) + return + } + ns, ok = tombstone.Obj.(*corev1.Namespace) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Namespace: %#v", tombstone.Obj)) + return + } + } + if ns != nil { + c.nqosNamespaceQueue.Add(newEventData(ns, nil)) } - klog.V(5).Infof("Deleting Namespace in Network QoS %s", key) - c.nqosNamespaceQueue.Add(key) } // onNQOSPodAdd queues the pod for processing. func (c *Controller) onNQOSPodAdd(obj interface{}) { - key, err := cache.MetaNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + pod, ok := obj.(*corev1.Pod) + if !ok { + utilruntime.HandleError(fmt.Errorf("expecting Pod but received %T", obj)) + return + } + if pod == nil { + utilruntime.HandleError(fmt.Errorf("empty pod")) return } - klog.V(5).Infof("Adding Pod in Network QoS controller %s", key) - c.nqosPodQueue.Add(key) + c.nqosPodQueue.Add(newEventData(nil, pod)) } // onNQOSPodUpdate queues the pod for processing. func (c *Controller) onNQOSPodUpdate(oldObj, newObj interface{}) { - oldPod := oldObj.(*corev1.Pod) - newPod := newObj.(*corev1.Pod) - + oldPod, ok := oldObj.(*corev1.Pod) + if !ok { + utilruntime.HandleError(fmt.Errorf("expecting Pod but received %T", oldObj)) + return + } + newPod, ok := newObj.(*corev1.Pod) + if !ok { + utilruntime.HandleError(fmt.Errorf("expecting Pod but received %T", newObj)) + return + } + if oldPod == nil || newPod == nil { + utilruntime.HandleError(fmt.Errorf("empty pod")) + return + } // don't process resync or objects that are marked for deletion if oldPod.ResourceVersion == newPod.ResourceVersion || !newPod.GetDeletionTimestamp().IsZero() { @@ -426,36 +499,51 @@ func (c *Controller) onNQOSPodUpdate(oldObj, newObj interface{}) { oldPodCompleted == newPodCompleted { return } - key, err := cache.MetaNamespaceKeyFunc(newObj) - if err == nil { - klog.V(5).Infof("Updating Pod in Network QoS controller %s: "+ - "podLabels %v, podIPs: %v, PodCompleted?: %v", key, newPodLabels, - newPodIPs, newPodCompleted) - c.nqosPodQueue.Add(key) - } + klog.V(5).Infof("Handling update event for pod %s/%s, labels %v, podIPs: %v, PodCompleted?: %v", newPod.Namespace, newPod.Name, newPodLabels, newPodIPs, newPodCompleted) + c.nqosPodQueue.Add(newEventData(oldPod, newPod)) } // onNQOSPodDelete queues the pod for processing. func (c *Controller) onNQOSPodDelete(obj interface{}) { - key, err := cache.MetaNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) - return + pod, ok := obj.(*corev1.Pod) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) + return + } + pod, ok = tombstone.Obj.(*corev1.Pod) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Pod: %#v", tombstone.Obj)) + return + } + } + if pod != nil { + c.nqosPodQueue.Add(newEventData(pod, nil)) } - klog.V(5).Infof("Deleting Pod Network QoS %s", key) - c.nqosPodQueue.Add(key) } // onNQOSNodeUpdate queues the node for processing. func (c *Controller) onNQOSNodeUpdate(oldObj, newObj interface{}) { - oldNode := oldObj.(*corev1.Node) - newNode := newObj.(*corev1.Node) - + oldNode, ok := oldObj.(*corev1.Node) + if !ok { + utilruntime.HandleError(fmt.Errorf("expecting Node but received %T", oldObj)) + return + } + newNode, ok := newObj.(*corev1.Node) + if !ok { + utilruntime.HandleError(fmt.Errorf("expecting Node but received %T", newObj)) + return + } // don't process resync or objects that are marked for deletion if oldNode.ResourceVersion == newNode.ResourceVersion || !newNode.GetDeletionTimestamp().IsZero() { return } + // node not in local zone, no need to process + if !c.isNodeInLocalZone(oldNode) && !c.isNodeInLocalZone(newNode) { + return + } // only care about node's zone name changes if !util.NodeZoneAnnotationChanged(oldNode, newNode) { return diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go index 46b629641d..d4363b8acb 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go @@ -6,130 +6,179 @@ import ( "time" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" + + nqosv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" + crdtypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" ) func (c *Controller) processNextNQOSNamespaceWorkItem(wg *sync.WaitGroup) bool { wg.Add(1) defer wg.Done() - nqosNSKey, quit := c.nqosNamespaceQueue.Get() - if quit { + eventData, shutdown := c.nqosNamespaceQueue.Get() + if shutdown { return false } - defer c.nqosNamespaceQueue.Done(nqosNSKey) - - err := c.syncNetworkQoSNamespace(nqosNSKey) - if err == nil { - c.nqosNamespaceQueue.Forget(nqosNSKey) - return true - } + defer c.nqosNamespaceQueue.Done(eventData) - utilruntime.HandleError(fmt.Errorf("%v failed with: %v", nqosNSKey, err)) - - if c.nqosNamespaceQueue.NumRequeues(nqosNSKey) < maxRetries { - c.nqosNamespaceQueue.AddRateLimited(nqosNSKey) - return true + if err := c.syncNetworkQoSNamespace(eventData); err != nil { + if c.nqosNamespaceQueue.NumRequeues(eventData) < maxRetries { + klog.Errorf("%s: Failed to reconcile namespace %s: %v", c.controllerName, eventData.name(), err) + c.nqosNamespaceQueue.AddRateLimited(eventData) + return true + } + utilruntime.HandleError(fmt.Errorf("failed to reconcile namespace %s: %v", eventData.name(), err)) } - - c.nqosNamespaceQueue.Forget(nqosNSKey) + c.nqosNamespaceQueue.Forget(eventData) return true } -// syncNetworkQoSNamespace decides the main logic everytime -// we dequeue a key from the nqosNamespaceQueue cache -func (c *Controller) syncNetworkQoSNamespace(key string) error { +// syncNetworkQoSNamespace checks if the namespace change affects any NetworkQoS +func (c *Controller) syncNetworkQoSNamespace(eventData *eventData[*corev1.Namespace]) error { startTime := time.Now() - klog.V(5).Infof("Processing sync for Namespace %s in Network QoS controller", key) + klog.V(5).Infof("Reconciling namespace event for %s ", eventData.name()) defer func() { - klog.V(5).Infof("Finished syncing Namespace %s Network QoS controller: took %v", key, time.Since(startTime)) + klog.V(5).Infof("Finished reconciling namespace %s, took %v", eventData.name(), time.Since(startTime)) }() - namespace, err := c.nqosNamespaceLister.Get(key) - if err != nil && !apierrors.IsNotFound(err) { + nqosNames, err := c.getNetworkQosForNamespaceChange(eventData) + if err != nil { return err } - // (i) namespace add - // (ii) namespace update because namespace's labels changed - // (iii) namespace delete - // case (iii) - if namespace == nil { - for _, cachedKey := range c.nqosCache.GetKeys() { - err := c.nqosCache.DoWithLock(cachedKey, func(nqosKey string) error { - if nqosObj, _ := c.nqosCache.Load(nqosKey); nqosObj != nil { - return c.clearNamespaceForNQOS(key, nqosObj) - } - return nil - }) - if err != nil { - return err - } - } - recordNamespaceReconcileDuration(c.controllerName, time.Since(startTime).Milliseconds()) - return nil - } - // case (i)/(ii) - for _, cachedKey := range c.nqosCache.GetKeys() { - err := c.nqosCache.DoWithLock(cachedKey, func(nqosKey string) error { - if nqosObj, _ := c.nqosCache.Load(nqosKey); nqosObj != nil { - return c.setNamespaceForNQOS(namespace, nqosObj) - } else { - klog.Warningf("NetworkQoS not synced yet: %s", nqosKey) - // requeue nqos key to sync it - c.nqosQueue.Add(nqosKey) - // requeue namespace key 3 seconds later, allow NetworkQoS to be handled - c.nqosNamespaceQueue.AddAfter(key, 3*time.Second) - return nil - } - }) - if err != nil { - return err - } + for nqosName := range nqosNames { + c.nqosQueue.Add(nqosName) } recordNamespaceReconcileDuration(c.controllerName, time.Since(startTime).Milliseconds()) return nil } -// clearNamespaceForNQOS will handle the logic for figuring out if the provided namespace name -// has pods that affect address sets of the cached network qoses. If so, remove them. -func (c *Controller) clearNamespaceForNQOS(namespace string, nqosState *networkQoSState) error { - for _, rule := range nqosState.EgressRules { - if rule.Classifier == nil { +// getNetworkQosForNamespaceChange returns the set of NetworkQoS names that are affected by the namespace change +func (c *Controller) getNetworkQosForNamespaceChange(eventData *eventData[*corev1.Namespace]) (sets.Set[*nqosv1alpha1.NetworkQoS], error) { + networkQoSes := sets.Set[*nqosv1alpha1.NetworkQoS]{} + nqoses, err := c.getAllNetworkQoSes() + if err != nil { + return nil, err + } + for _, nqos := range nqoses { + ns := eventData.new + if ns == nil { + ns = eventData.old + } + // check if any network selector matches the namespace, or ns label change affects the network selection + if namespaceMatchesNetworkSelector(ns, nqos) || networkSelectionChanged(nqos, eventData.new, eventData.old) { + networkQoSes.Insert(nqos) continue } - for _, dest := range rule.Classifier.Destinations { - if err := dest.removePodsInNamespace(namespace); err != nil { - return fmt.Errorf("error removing IPs from dest address set %s: %v", dest.DestAddrSet.GetName(), err) - } + // check if any egress rule matches the namespace, or ns label change affects the egress selection + if namespaceMatchesEgressRule(ns, nqos) || egressSelectionChanged(nqos, eventData.new, eventData.old) { + networkQoSes.Insert(nqos) } } - return nil + return networkQoSes, nil } -// setNamespaceForNQOS will handle the logic for figuring out if the provided namespace name -// has pods that need to populate or removed from the address sets of the network qoses. -func (c *Controller) setNamespaceForNQOS(namespace *corev1.Namespace, nqosState *networkQoSState) error { - for _, rule := range nqosState.EgressRules { - if rule.Classifier == nil { +// namespaceMatchesNetworkSelector checks if the namespace matches any of the network selectors in the NetworkQoS +func namespaceMatchesNetworkSelector(namespace *corev1.Namespace, nqos *nqosv1alpha1.NetworkQoS) bool { + for _, selector := range nqos.Spec.NetworkSelectors { + var nsSelector *metav1.LabelSelector + switch { + case selector.NetworkAttachmentDefinitionSelector != nil: + if selector.NetworkAttachmentDefinitionSelector.NamespaceSelector.Size() == 0 { + // namespace selector is empty, match all + return true + } + nsSelector = &selector.NetworkAttachmentDefinitionSelector.NamespaceSelector + /*case selector.PrimaryUserDefinedNetworkSelector != nil: + if selector.PrimaryUserDefinedNetworkSelector.NamespaceSelector.Size() == 0 { + // namespace selector is empty, match all + return true + } + nsSelector = &selector.PrimaryUserDefinedNetworkSelector.NamespaceSelector + case selector.SecondaryUserDefinedNetworkSelector != nil: + if selector.SecondaryUserDefinedNetworkSelector.NamespaceSelector.Size() == 0 { + // namespace selector is empty, match all + return true + } + nsSelector = &selector.SecondaryUserDefinedNetworkSelector.NamespaceSelector + */ + } + if nsSelector == nil { continue } - for index, dest := range rule.Classifier.Destinations { - if dest.PodSelector == nil && dest.NamespaceSelector == nil { - // no selectors, no address set - continue + if ls, err := metav1.LabelSelectorAsSelector(nsSelector); err != nil { + klog.Errorf("%s/%s - failed to convert namespace selector %s : %v", nqos.Namespace, nqos.Name, nsSelector.String(), err) + } else if ls != nil && ls.Matches(labels.Set(namespace.Labels)) { + return true + } + } + return false +} + +func namespaceMatchesEgressRule(namespace *corev1.Namespace, nqos *nqosv1alpha1.NetworkQoS) bool { + for _, egress := range nqos.Spec.Egress { + for _, dest := range egress.Classifier.To { + if dest.NamespaceSelector == nil || dest.NamespaceSelector.Size() == 0 { + // namespace selector is empty, match all + return true } - if !dest.matchNamespace(namespace, nqosState.namespace) { - if err := dest.removePodsInNamespace(namespace.Name); err != nil { - return fmt.Errorf("error removing pods in namespace %s from NetworkQoS %s/%s rule %d: %v", namespace.Name, nqosState.namespace, nqosState.name, index, err) - } + if ls, err := metav1.LabelSelectorAsSelector(dest.NamespaceSelector); err != nil { + klog.Errorf("%s/%s - failed to convert egress namespace selector %s: %v", nqos.Namespace, nqos.Name, dest.NamespaceSelector.String(), err) + } else if ls != nil && ls.Matches(labels.Set(namespace.Labels)) { + return true + } + } + } + return false +} + +// check if namespace change causes the network selection change +func networkSelectionChanged(nqos *nqosv1alpha1.NetworkQoS, new *corev1.Namespace, old *corev1.Namespace) bool { + for _, selector := range nqos.Spec.NetworkSelectors { + var nsSelector *metav1.LabelSelector + switch selector.NetworkSelectionType { + /*case crdtypes.PrimaryUserDefinedNetworks: + if selector.PrimaryUserDefinedNetworkSelector != nil { + nsSelector = &selector.PrimaryUserDefinedNetworkSelector.NamespaceSelector + } + case crdtypes.SecondaryUserDefinedNetworks: + if selector.SecondaryUserDefinedNetworkSelector != nil { + nsSelector = &selector.SecondaryUserDefinedNetworkSelector.NamespaceSelector + } + */ + case crdtypes.NetworkAttachmentDefinitions: + if selector.NetworkAttachmentDefinitionSelector != nil { + nsSelector = &selector.NetworkAttachmentDefinitionSelector.NamespaceSelector + } + } + if nsSelector == nil { + continue + } + if ls, err := metav1.LabelSelectorAsSelector(nsSelector); err != nil { + // namespace selector is not valid, skip this selector + klog.Errorf("%s/%s - failed to convert namespace selector %s: %v", nqos.Namespace, nqos.Name, nsSelector.String(), err) + } else if old != nil && new != nil { + return ls.Matches(labels.Set(old.Labels)) != ls.Matches(labels.Set(new.Labels)) + } + } + return false +} + +func egressSelectionChanged(nqos *nqosv1alpha1.NetworkQoS, new *corev1.Namespace, old *corev1.Namespace) bool { + for _, egress := range nqos.Spec.Egress { + for _, dest := range egress.Classifier.To { + if dest.NamespaceSelector == nil || dest.NamespaceSelector.Size() == 0 { + // empty namespace selector won't make difference continue } - // add matching pods in the namespace to dest - if err := dest.addPodsInNamespace(c, namespace.Name); err != nil { - return err + if nsSelector, err := metav1.LabelSelectorAsSelector(dest.NamespaceSelector); err != nil { + klog.Errorf("Failed to convert namespace selector in %s/%s: %v", nqos.Namespace, nqos.Name, err) + } else if old != nil && new != nil { + return nsSelector.Matches(labels.Set(old.Labels)) != nsSelector.Matches(labels.Set(new.Labels)) } - klog.V(5).Infof("Added pods in namespace %s for NetworkQoS %s/%s rule %d", namespace.Name, nqosState.namespace, nqosState.name, index) } } - return nil + return false } diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go index fed2f8dbbf..d8ac3d0925 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go @@ -6,13 +6,10 @@ import ( "time" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -41,121 +38,31 @@ func (c *Controller) processNextNQOSNodeWorkItem(wg *sync.WaitGroup) bool { return true } -// syncNetworkQoSNode decides the main logic everytime -// we dequeue a key from the nqosNodeQueue cache +// syncNetworkQoSNode triggers resync of all the NetworkQoSes when a node moves in/out of local zone func (c *Controller) syncNetworkQoSNode(key string) error { startTime := time.Now() - _, name, err := cache.SplitMetaNamespaceKey(key) + _, nodeName, err := cache.SplitMetaNamespaceKey(key) if err != nil { return err } - klog.V(5).Infof("Processing sync for Node %s in Network QoS controller", name) + klog.V(5).Infof("Processing sync for Node %s in Network QoS controller", nodeName) defer func() { - klog.V(5).Infof("Finished syncing Node %s Network QoS controller: took %v", name, time.Since(startTime)) + klog.V(5).Infof("Finished syncing Node %s Network QoS controller: took %v", nodeName, time.Since(startTime)) }() - node, err := c.nqosNodeLister.Get(name) - if err != nil && !apierrors.IsNotFound(err) { - return err - } - - if !c.isNodeInLocalZone(node) && c.TopologyType() == types.Layer3Topology { - // clean up qos/address set for the node - return c.cleanupQoSFromNode(node.Name) - } - // configure qos for pods on the node - pods, err := c.getPodsByNode(node.Name) - if err != nil { - return err - } - switchName := c.getLogicalSwitchName(node.Name) - _, err = c.findLogicalSwitch(switchName) - if err != nil { - klog.V(4).Infof("Failed to look up logical switch %s: %v", switchName, err) - return err - } - for _, cachedKey := range c.nqosCache.GetKeys() { - err := c.nqosCache.DoWithLock(cachedKey, func(nqosKey string) error { - nqosObj, _ := c.nqosCache.Load(nqosKey) - if nqosObj == nil { - klog.Warningf("NetworkQoS not synced yet: %s", nqosKey) - // requeue nqos key to sync it - c.nqosQueue.Add(nqosKey) - // requeue namespace key 3 seconds later, allow NetworkQoS to be handled - c.nqosNamespaceQueue.AddAfter(key, 3*time.Second) - return nil - } - for _, pod := range pods { - ns, err := c.nqosNamespaceLister.Get(pod.Namespace) - if err != nil { - return fmt.Errorf("failed to look up namespace %s: %w", pod.Namespace, err) - } - if err = c.setPodForNQOS(pod, nqosObj, ns); err != nil { - return err - } - } - return nil - }) - if err != nil { - return err + // node moves in/out of local zone, resync all the NetworkQoSes + for _, nqosName := range c.nqosCache.GetKeys() { + ns, name, _ := cache.SplitMetaNamespaceKey(nqosName) + if nqos, err := c.nqosLister.NetworkQoSes(ns).Get(name); err != nil { + klog.Errorf("Failed to get NetworkQoS %s: %v", nqosName, err) + } else if nqos != nil { + c.nqosQueue.Add(nqos) } } return nil } -func (c *Controller) getPodsByNode(nodeName string) ([]*corev1.Pod, error) { - pods, err := c.nqosPodLister.List(labels.Everything()) - if err != nil { - return nil, fmt.Errorf("failed to list pods: %w", err) - } - podsByNode := []*corev1.Pod{} - for _, pod := range pods { - if util.PodScheduled(pod) && !util.PodWantsHostNetwork(pod) && pod.Spec.NodeName == nodeName { - podsByNode = append(podsByNode, pod) - } - } - return podsByNode, nil -} - // isNodeInLocalZone returns whether the provided node is in a zone local to the zone controller func (c *Controller) isNodeInLocalZone(node *corev1.Node) bool { return util.GetNodeZone(node) == c.zone } - -func (c *Controller) cleanupQoSFromNode(nodeName string) error { - switchName := c.getLogicalSwitchName(nodeName) - for _, cachedKey := range c.nqosCache.GetKeys() { - err := c.nqosCache.DoWithLock(cachedKey, func(nqosKey string) error { - nqosObj, _ := c.nqosCache.Load(nqosKey) - if nqosObj == nil { - klog.V(4).Infof("Expected networkqos %s not found in cache", nqosKey) - return nil - } - pods := []string{} - if val, _ := nqosObj.SwitchRefs.Load(switchName); val != nil { - pods = val.([]string) - } - for _, pod := range pods { - addrs, _ := nqosObj.Pods.Load(pod) - if addrs != nil { - err := nqosObj.SrcAddrSet.DeleteAddresses(addrs.([]string)) - if err != nil { - return err - } - } - nqosObj.Pods.Delete(pod) - } - err := c.removeQoSFromLogicalSwitches(nqosObj, []string{switchName}) - if err != nil { - return err - } - nqosObj.SwitchRefs.Delete(switchName) - return nil - }) - if err != nil { - return err - } - klog.V(4).Infof("Successfully cleaned up qos rules %s from %s", cachedKey, switchName) - } - return nil -} diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go index 03bf519ebd..82eed9b07e 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go @@ -145,7 +145,7 @@ func (c *Controller) cleanupStaleOvnObjects(qosState *networkQoSState) error { if _, qosInUse := qosState.SwitchRefs.Load(ls.Name); indexWithinRange && qosInUse { continue } - qosList := staleSwitchQoSMap[ls.UUID] + qosList := staleSwitchQoSMap[ls.Name] if qosList == nil { qosList = []*nbdb.QoS{} } diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go index 440f25f4b4..44ac375b6c 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go @@ -7,142 +7,55 @@ import ( "time" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/tools/cache" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + nqosv1alpha1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" ) func (c *Controller) processNextNQOSPodWorkItem(wg *sync.WaitGroup) bool { wg.Add(1) defer wg.Done() - nqosPodKey, quit := c.nqosPodQueue.Get() - if quit { + eventData, shutdown := c.nqosPodQueue.Get() + if shutdown { return false } - defer c.nqosPodQueue.Done(nqosPodKey) - err := c.syncNetworkQoSPod(nqosPodKey) - if err == nil { - c.nqosPodQueue.Forget(nqosPodKey) - return true - } - - utilruntime.HandleError(fmt.Errorf("%v failed with: %v", nqosPodKey, err)) + defer c.nqosPodQueue.Done(eventData) - if c.nqosPodQueue.NumRequeues(nqosPodKey) < maxRetries { - c.nqosPodQueue.AddRateLimited(nqosPodKey) - return true + if err := c.syncNetworkQoSPod(eventData); err != nil { + if c.nqosPodQueue.NumRequeues(eventData) < maxRetries { + c.nqosPodQueue.AddRateLimited(eventData) + return true + } + klog.Errorf("%s: Failed to reconcile pod %s/%s: %v", c.controllerName, eventData.namespace(), eventData.name(), err) + utilruntime.HandleError(fmt.Errorf("failed to reconcile pod %s/%s: %v", eventData.namespace(), eventData.name(), err)) } - - c.nqosPodQueue.Forget(nqosPodKey) + c.nqosPodQueue.Forget(eventData) return true } // syncNetworkQoSPod decides the main logic everytime // we dequeue a key from the nqosPodQueue cache -func (c *Controller) syncNetworkQoSPod(key string) error { +func (c *Controller) syncNetworkQoSPod(eventData *eventData[*corev1.Pod]) error { startTime := time.Now() - // Iterate all NQOses and check if this namespace start/stops matching - // any NQOS and add/remove the setup accordingly. Namespaces can match multiple - // NQOses objects, so continue iterating all NQOS objects before finishing. - namespace, name, err := cache.SplitMetaNamespaceKey(key) + networkQoSes, err := c.getNetworkQosForPodChange(eventData) if err != nil { return err } - klog.V(5).Infof("Processing sync for Pod %s/%s in Network QoS controller", namespace, name) - - defer func() { - klog.V(5).Infof("Finished syncing Pod %s/%s Network QoS controller: took %v", namespace, name, time.Since(startTime)) - }() - ns, err := c.nqosNamespaceLister.Get(namespace) - if err != nil { - return err - } - podNamespaceLister := c.nqosPodLister.Pods(namespace) - pod, err := podNamespaceLister.Get(name) - if err != nil && !apierrors.IsNotFound(err) { - return err - } - // (i) pod add - // (ii) pod update because LSP and IPAM is done OR pod's labels changed - // (iii) pod update because pod went into completed state - // (iv) pod delete - // case(iii)/(iv) - if pod == nil || util.PodCompleted(pod) { - for _, cachedKey := range c.nqosCache.GetKeys() { - err := c.nqosCache.DoWithLock(cachedKey, func(nqosKey string) error { - if nqosObj, _ := c.nqosCache.Load(nqosKey); nqosObj != nil { - return c.clearPodForNQOS(namespace, name, nqosObj) - } - return nil - }) - if err != nil { - return err - } - } - recordPodReconcileDuration(c.controllerName, time.Since(startTime).Milliseconds()) - return nil - } - // We don't want to shortcut only local zone pods here since peer pods - // whether local or remote need to be dealt with. So we let the main - // NQOS controller take care of the local zone pods logic for the policy subjects - if !util.PodScheduled(pod) || util.PodWantsHostNetwork(pod) { - // we don't support NQOS with host-networked pods - // if pod is no scheduled yet, return and we can process it on its next update - // because anyways at that stage pod is considered to belong to remote zone - return nil - } - // case (i)/(ii) - for _, cachedKey := range c.nqosCache.GetKeys() { - err := c.nqosCache.DoWithLock(cachedKey, func(nqosKey string) error { - if nqosObj, _ := c.nqosCache.Load(nqosKey); nqosObj != nil { - return c.setPodForNQOS(pod, nqosObj, ns) - } else { - klog.Warningf("NetworkQoS not synced yet: %s", nqosKey) - // requeue nqos key to sync it - c.nqosQueue.Add(nqosKey) - // requeue pod key in 3 sec - c.nqosPodQueue.AddAfter(key, 3*time.Second) - } - return nil - }) - if err != nil { - return err - } + for nqos := range networkQoSes { + c.nqosQueue.Add(nqos) } recordPodReconcileDuration(c.controllerName, time.Since(startTime).Milliseconds()) return nil } -// clearPodForNQOS will handle the logic for figuring out if the provided pod name -func (c *Controller) clearPodForNQOS(namespace, name string, nqosState *networkQoSState) error { - fullPodName := joinMetaNamespaceAndName(namespace, name) - if err := nqosState.removePodFromSource(c, fullPodName, nil); err != nil { - return err - } - // remove pod from destination address set - for _, rule := range nqosState.EgressRules { - if rule.Classifier == nil { - continue - } - for _, dest := range rule.Classifier.Destinations { - if dest.PodSelector == nil && dest.NamespaceSelector == nil { - continue - } - if err := dest.removePod(fullPodName, nil); err != nil { - return err - } - } - } - return nil -} - // setPodForNQOS will check if the pod meets source selector or dest selector // - match source: add the ip to source address set, bind qos rule to the switch // - match dest: add the ip to the destination address set -func (c *Controller) setPodForNQOS(pod *corev1.Pod, nqosState *networkQoSState, namespace *corev1.Namespace) error { +func (c *Controller) setPodForNQOS(pod *corev1.Pod, nqosState *networkQoSState, namespace *corev1.Namespace, addressSetMap map[string]sets.Set[string]) error { addresses, err := getPodAddresses(pod, c.NetInfo) if err == nil && len(addresses) == 0 { // pod either is not attached to this network, or hasn't been annotated with addresses yet, return without retry @@ -156,7 +69,9 @@ func (c *Controller) setPodForNQOS(pod *corev1.Pod, nqosState *networkQoSState, if c.isPodScheduledinLocalZone(pod) { if matchSource := nqosState.matchSourceSelector(pod); matchSource { // pod's labels match source selector - err = nqosState.configureSourcePod(c, pod, addresses) + if err = nqosState.configureSourcePod(c, pod, addresses); err == nil { + populateAddresses(addressSetMap, nqosState.SrcAddrSet.GetName(), addresses) + } } else { // pod's labels don't match selector, but it probably matched previously err = nqosState.removePodFromSource(c, fullPodName, addresses) @@ -171,10 +86,10 @@ func (c *Controller) setPodForNQOS(pod *corev1.Pod, nqosState *networkQoSState, return err } } - return reconcilePodForDestinations(nqosState, namespace, pod, addresses) + return reconcilePodForDestinations(nqosState, namespace, pod, addresses, addressSetMap) } -func reconcilePodForDestinations(nqosState *networkQoSState, podNs *corev1.Namespace, pod *corev1.Pod, addresses []string) error { +func reconcilePodForDestinations(nqosState *networkQoSState, podNs *corev1.Namespace, pod *corev1.Pod, addresses []string, addressSetMap map[string]sets.Set[string]) error { fullPodName := joinMetaNamespaceAndName(pod.Namespace, pod.Name) for _, rule := range nqosState.EgressRules { for index, dest := range rule.Classifier.Destinations { @@ -186,6 +101,7 @@ func reconcilePodForDestinations(nqosState *networkQoSState, podNs *corev1.Names if err := dest.addPod(pod.Namespace, pod.Name, addresses); err != nil { return fmt.Errorf("failed to add addresses {%s} to dest address set %s for NetworkQoS %s/%s, rule index %d: %v", strings.Join(addresses, ","), dest.DestAddrSet.GetName(), nqosState.namespace, nqosState.name, index, err) } + populateAddresses(addressSetMap, dest.DestAddrSet.GetName(), addresses) } else { // no match, remove the pod if it's previously selected if err := dest.removePod(fullPodName, addresses); err != nil { @@ -196,3 +112,126 @@ func reconcilePodForDestinations(nqosState *networkQoSState, podNs *corev1.Names } return nil } + +func (c *Controller) getNetworkQosForPodChange(eventData *eventData[*corev1.Pod]) (sets.Set[*nqosv1alpha1.NetworkQoS], error) { + var pod *corev1.Pod + if eventData.new != nil { + pod = eventData.new + } else { + pod = eventData.old + } + podNs, err := c.nqosNamespaceLister.Get(pod.Namespace) + if err != nil { + return nil, fmt.Errorf("failed to get namespace %s: %v", pod.Namespace, err) + } + nqoses, err := c.getAllNetworkQoSes() + if err != nil { + return nil, err + } + affectedNetworkQoSes := sets.Set[*nqosv1alpha1.NetworkQoS]{} + for _, nqos := range nqoses { + if podMatchesSourceSelector(pod, nqos) { + affectedNetworkQoSes.Insert(nqos) + continue + } + // check if pod matches any egress + for _, egress := range nqos.Spec.Egress { + if podMatchesEgressSelector(podNs, pod, nqos, &egress) { + affectedNetworkQoSes.Insert(nqos) + continue + } + } + if podSelectionChanged(nqos, eventData.new, eventData.old) { + affectedNetworkQoSes.Insert(nqos) + } + } + return affectedNetworkQoSes, nil +} + +func podMatchesSourceSelector(pod *corev1.Pod, nqos *nqosv1alpha1.NetworkQoS) bool { + if nqos.Namespace != pod.Namespace { + return false + } + if nqos.Spec.PodSelector.Size() == 0 { + return true + } + podSelector, err := metav1.LabelSelectorAsSelector(&nqos.Spec.PodSelector) + if err != nil { + klog.Errorf("Failed to convert pod selector in %s/%s: %v", nqos.Namespace, nqos.Name, err) + return false + } + return podSelector.Matches(labels.Set(pod.Labels)) +} + +func podMatchesEgressSelector(podNs *corev1.Namespace, pod *corev1.Pod, nqos *nqosv1alpha1.NetworkQoS, egress *nqosv1alpha1.Rule) bool { + var nsSelector labels.Selector + var podSelector labels.Selector + var err error + match := false + for _, dest := range egress.Classifier.To { + if dest.NamespaceSelector != nil { + if nsSelector, err = metav1.LabelSelectorAsSelector(dest.NamespaceSelector); err != nil { + klog.Errorf("Failed to convert namespace selector in %s/%s: %v", nqos.Namespace, nqos.Name, err) + continue + } + } + if dest.PodSelector != nil { + if podSelector, err = metav1.LabelSelectorAsSelector(dest.PodSelector); err != nil { + klog.Errorf("Failed to convert pod selector in %s/%s: %v", nqos.Namespace, nqos.Name, err) + continue + } + } + switch { + case nsSelector != nil && podSelector != nil: + match = nsSelector.Matches(labels.Set(podNs.Labels)) && podSelector.Matches(labels.Set(pod.Labels)) + case nsSelector == nil && podSelector != nil: + match = pod.Namespace == nqos.Namespace && podSelector.Matches(labels.Set(pod.Labels)) + case nsSelector != nil && podSelector == nil: + match = nsSelector.Matches(labels.Set(podNs.Labels)) + default: //nsSelector == nil && podSelector == nil: + match = false + } + if match { + return true + } + } + return false +} + +func podSelectionChanged(nqos *nqosv1alpha1.NetworkQoS, new *corev1.Pod, old *corev1.Pod) bool { + if new == nil || old == nil { + return false + } + if nqos.Spec.PodSelector.Size() > 0 { + if podSelector, err := metav1.LabelSelectorAsSelector(&nqos.Spec.PodSelector); err != nil { + klog.Errorf("Failed to convert pod selector in %s/%s: %v", nqos.Namespace, nqos.Name, err) + } else if podSelector.Matches(labels.Set(new.Labels)) != podSelector.Matches(labels.Set(old.Labels)) { + return true + } + } + for _, egress := range nqos.Spec.Egress { + for _, dest := range egress.Classifier.To { + if dest.PodSelector == nil { + continue + } + if podSelector, err := metav1.LabelSelectorAsSelector(dest.PodSelector); err != nil { + klog.Errorf("Failed to convert pod selector in %s/%s: %v", nqos.Namespace, nqos.Name, err) + } else if podSelector.Matches(labels.Set(new.Labels)) != podSelector.Matches(labels.Set(old.Labels)) { + return true + } + } + } + return false +} + +func populateAddresses(addressSetMap map[string]sets.Set[string], name string, addresses []string) { + if len(addresses) == 0 { + return + } + addressSet := addressSetMap[name] + if addressSet == nil { + addressSet = sets.New[string]() + } + addressSet.Insert(addresses...) + addressSetMap[name] = addressSet +} diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go index 83162e13a8..fd3bf8f6f3 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go @@ -263,13 +263,13 @@ func tableEntrySetup(enableInterconnect bool) { fakeNQoSClient = ovnClientset.NetworkQoSClient initEnv(ovnClientset, initialDB) // init controller for default network - initNetworkQoSController(&util.DefaultNetInfo{}, defaultAddrsetFactory, defaultControllerName) + initNetworkQoSController(&util.DefaultNetInfo{}, defaultAddrsetFactory, defaultControllerName, enableInterconnect) // init controller for stream nad streamImmutableNadInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) streamNadInfo := util.NewMutableNetInfo(streamImmutableNadInfo) streamNadInfo.AddNADs("default/stream") - initNetworkQoSController(streamNadInfo, streamAddrsetFactory, streamControllerName) + initNetworkQoSController(streamNadInfo, streamAddrsetFactory, streamControllerName, enableInterconnect) } var _ = AfterEach(func() { @@ -361,7 +361,7 @@ var _ = Describe("NetworkQoS Controller", func() { return err.Error() } return qos.Match - }).WithTimeout(5 * time.Second).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || (ip4.dst == 128.116.0.0/17 && ip4.dst != {128.116.0.0,128.116.0.255})) && tcp && tcp.dst == {8080,8081}", srcHashName4, dst1HashName4))) + }).WithTimeout(10 * time.Second).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || (ip4.dst == 128.116.0.0/17 && ip4.dst != {128.116.0.0,128.116.0.255})) && tcp && tcp.dst == {8080,8081}", srcHashName4, dst1HashName4))) dst3AddrSet, err3 := findAddressSet(defaultAddrsetFactory, nqosNamespace, nqosName, "1", "0", defaultControllerName) Expect(err3).NotTo(HaveOccurred()) @@ -372,7 +372,7 @@ var _ = Describe("NetworkQoS Controller", func() { return err.Error() } return qos.Match - }).WithTimeout(5 * time.Second).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || ip4.dst == 128.118.0.0/17) && ((tcp && tcp.dst == {8080,8081}) || (udp && udp.dst == {9090,8080}))", srcHashName4, dst3HashName4))) + }).WithTimeout(10 * time.Second).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && (ip4.dst == {$%s} || (ip4.dst == 128.118.0.0/17 && ip4.dst != {128.118.0.0,128.118.0.255})) && ((tcp && tcp.dst == {8080,8081}) || (udp && udp.dst == {9090,8080}))", srcHashName4, dst3HashName4))) } By("removes IP from destination address set if pod's labels don't match the selector") @@ -554,6 +554,31 @@ var _ = Describe("NetworkQoS Controller", func() { eventuallyExpectNoQoS(defaultControllerName, nqosNamespace, "stream-qos", 0) } + By("will not populate source address set NetworkQos with incorrect namespace selector in spec") + { + nqos4StreamNet.Spec.NetworkSelectors = []crdtypes.NetworkSelector{ + { + NetworkSelectionType: crdtypes.NetworkAttachmentDefinitions, + NetworkAttachmentDefinitionSelector: &crdtypes.NetworkAttachmentDefinitionSelector{ + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": "unknown", + }, + }, + NetworkSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": "stream", + }, + }, + }, + }, + } + nqos4StreamNet.ResourceVersion = time.Now().String() + _, err := fakeNQoSClient.K8sV1alpha1().NetworkQoSes(nqosNamespace).Update(context.TODO(), nqos4StreamNet, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + eventuallyAddressSetHasNo(streamAddrsetFactory, nqosNamespace, "stream-qos", "src", "0", streamControllerName, "10.128.2.3") + } + By("handles NetworkQos on secondary network") { nqos4StreamNet.Spec.NetworkSelectors = []crdtypes.NetworkSelector{ @@ -606,7 +631,7 @@ var _ = Describe("NetworkQoS Controller", func() { CIDR: "128.115.0.0/17", Except: []string{ "128.115.0.0", - "128.115.0.255", + "123.123.123.123", }, }, }, @@ -620,7 +645,7 @@ var _ = Describe("NetworkQoS Controller", func() { Expect(err).NotTo(HaveOccurred()) qos := eventuallyExpectQoS(defaultControllerName, nqosNamespace, "no-source-selector", 0) v4HashName, _ := addrset.GetASHashNames() - Expect(qos.Match).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && ip4.dst == 128.115.0.0/17 && ip4.dst != {128.115.0.0,128.115.0.255}", v4HashName))) + Expect(qos.Match).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && ip4.dst == 128.115.0.0/17 && ip4.dst != {128.115.0.0,123.123.123.123}", v4HashName))) } By("clear QoS attributes of existing NetworkQoS and make sure that is proper") @@ -671,7 +696,7 @@ var _ = Describe("NetworkQoS Controller", func() { Expect(err).NotTo(HaveOccurred()) Expect(qos).NotTo(BeNil()) return qos.Priority == 10010 && len(qos.Bandwidth) == 0 - }).WithTimeout(5 * time.Second).WithPolling(1 * time.Second).Should(BeTrue()) + }).WithTimeout(10 * time.Second).WithPolling(1 * time.Second).Should(BeTrue()) Expect(qos.Match).Should(Equal(fmt.Sprintf("ip4.src == {$%s} && ip4.dst == 128.115.0.0/17 && ip4.dst != {128.115.0.0,123.123.123.123}", v4HashName))) } @@ -736,7 +761,7 @@ var _ = Describe("NetworkQoS Controller", func() { Expect(err).NotTo(HaveOccurred()) localnetNadInfo := util.NewMutableNetInfo(localnetImmutableNadInfo) localnetNadInfo.AddNADs("default/netwk1") - ctrl := initNetworkQoSController(localnetNadInfo, addressset.NewFakeAddressSetFactory("netwk1-controller"), "netwk1-controller") + ctrl := initNetworkQoSController(localnetNadInfo, addressset.NewFakeAddressSetFactory("netwk1-controller"), "netwk1-controller", enableInterconnect) lsName := ctrl.getLogicalSwitchName("dummy") Expect(lsName).To(Equal("netwk1_ovn_localnet_switch")) } @@ -748,7 +773,7 @@ var _ = Describe("NetworkQoS Controller", func() { Expect(err).NotTo(HaveOccurred()) layer2NadInfo := util.NewMutableNetInfo(layer2ImmutableNadInfo) layer2NadInfo.AddNADs("default/netwk2") - ctrl := initNetworkQoSController(layer2NadInfo, addressset.NewFakeAddressSetFactory("netwk2-controller"), "netwk2-controller") + ctrl := initNetworkQoSController(layer2NadInfo, addressset.NewFakeAddressSetFactory("netwk2-controller"), "netwk2-controller", enableInterconnect) lsName := ctrl.getLogicalSwitchName("dummy") Expect(lsName).To(Equal("netwk2_ovn_layer2_switch")) } @@ -763,7 +788,7 @@ func eventuallyExpectAddressSet(addrsetFactory addressset.AddressSetFactory, nqo Eventually(func() bool { addrset, _ := findAddressSet(addrsetFactory, nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex, controllerName) return addrset != nil - }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("address set not found for %s/%s, rule %s, address block %s", nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex)) + }).WithTimeout(10*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("address set not found for %s/%s, rule %s, address block %s", nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex)) } func eventuallyAddressSetHas(addrsetFactory addressset.AddressSetFactory, nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex, controllerName, ip string) { @@ -774,7 +799,7 @@ func eventuallyAddressSetHas(addrsetFactory addressset.AddressSetFactory, nqosNa } ip4, _ := addrset.GetAddresses() return slices.Contains(ip4, ip) - }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("address set does not contain expected ip %s", ip)) + }).WithTimeout(10*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("address set does not contain expected ip %s", ip)) } func eventuallyAddressSetHasNo(addrsetFactory addressset.AddressSetFactory, nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex, controllerName, ip string) { @@ -785,7 +810,7 @@ func eventuallyAddressSetHasNo(addrsetFactory addressset.AddressSetFactory, nqos } ip4, _ := addrset.GetAddresses() return !slices.Contains(ip4, ip) - }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("address set still has unexpected ip %s", ip)) + }).WithTimeout(10*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("address set still has unexpected ip %s", ip)) } func findAddressSet(addrsetFactory addressset.AddressSetFactory, nqosNamespace, nqosName, qosRuleIndex, ipBlockIndex, controllerName string) (addressset.AddressSet, error) { @@ -798,7 +823,7 @@ func eventuallyExpectQoS(controllerName, qosNamespace, qosName string, index int Eventually(func() bool { qos, _ = findQoS(controllerName, qosNamespace, qosName, index) return qos != nil - }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("QoS not found for %s/%s", qosNamespace, qosName)) + }).WithTimeout(10*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("QoS not found for %s/%s", qosNamespace, qosName)) return qos } @@ -807,7 +832,7 @@ func eventuallyExpectNoQoS(controllerName, qosNamespace, qosName string, index i Eventually(func() bool { qos, _ = findQoS(controllerName, qosNamespace, qosName, index) return qos == nil - }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("Unexpected QoS found for %s/%s, index %d", qosNamespace, qosName, index)) + }).WithTimeout(10*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("Unexpected QoS found for %s/%s, index %d", qosNamespace, qosName, index)) } func findQoS(controllerName, qosNamespace, qosName string, index int) (*nbdb.QoS, error) { @@ -839,7 +864,7 @@ func eventuallySwitchHasQoS(switchName string, qos *nbdb.QoS) { } ls, _ = libovsdbops.GetLogicalSwitch(nbClient, criteria) return ls != nil && slices.Contains(ls.QOSRules, qos.UUID) - }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("QoS rule %s not found in switch %s", qos.UUID, switchName)) + }).WithTimeout(10*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("QoS rule %s not found in switch %s", qos.UUID, switchName)) } func eventuallySwitchHasNoQoS(switchName string, qos *nbdb.QoS) { @@ -850,7 +875,7 @@ func eventuallySwitchHasNoQoS(switchName string, qos *nbdb.QoS) { } ls, _ = libovsdbops.GetLogicalSwitch(nbClient, criteria) return ls != nil && !slices.Contains(ls.QOSRules, qos.UUID) - }).WithTimeout(5*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("Unexpected QoS rule %s found in switch %s", qos.UUID, switchName)) + }).WithTimeout(10*time.Second).WithPolling(1*time.Second).Should(BeTrue(), fmt.Sprintf("Unexpected QoS rule %s found in switch %s", qos.UUID, switchName)) } func initEnv(clientset *util.OVNClientset, initialDB *libovsdbtest.TestSetup) { @@ -888,7 +913,7 @@ func initEnv(clientset *util.OVNClientset, initialDB *libovsdbtest.TestSetup) { streamAddrsetFactory = addressset.NewFakeAddressSetFactory("stream-network-controller") } -func initNetworkQoSController(netInfo util.NetInfo, addrsetFactory addressset.AddressSetFactory, controllerName string) *Controller { +func initNetworkQoSController(netInfo util.NetInfo, addrsetFactory addressset.AddressSetFactory, controllerName string, enableInterconnect bool) *Controller { nqosController, err := NewController( controllerName, netInfo, @@ -902,7 +927,7 @@ func initNetworkQoSController(netInfo util.NetInfo, addrsetFactory addressset.Ad watchFactory.NADInformer(), addrsetFactory, func(pod *corev1.Pod) bool { - return pod.Spec.NodeName == "node1" + return pod.Spec.NodeName == "node1" || !enableInterconnect }, "node1") Expect(err).NotTo(HaveOccurred()) err = watchFactory.Start() diff --git a/go-controller/pkg/ovn/controller/network_qos/repair.go b/go-controller/pkg/ovn/controller/network_qos/repair.go index edfbf17dd8..5c97326e60 100644 --- a/go-controller/pkg/ovn/controller/network_qos/repair.go +++ b/go-controller/pkg/ovn/controller/network_qos/repair.go @@ -1,10 +1,8 @@ package networkqos import ( - "fmt" "time" - "k8s.io/apimachinery/pkg/labels" "k8s.io/klog/v2" networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" @@ -20,9 +18,9 @@ func (c *Controller) repairNetworkQoSes() error { defer func() { klog.Infof("Repairing network qos took %v", time.Since(start)) }() - nqoses, err := c.nqosLister.List(labels.Everything()) + nqoses, err := c.getAllNetworkQoSes() if err != nil { - return fmt.Errorf("unable to list NetworkQoSes from the lister: %v", err) + return err } nqosMap := map[string]*networkqosapi.NetworkQoS{} for _, nqos := range nqoses { diff --git a/go-controller/pkg/ovn/controller/network_qos/types.go b/go-controller/pkg/ovn/controller/network_qos/types.go index b390c038a8..188d61e689 100644 --- a/go-controller/pkg/ovn/controller/network_qos/types.go +++ b/go-controller/pkg/ovn/controller/network_qos/types.go @@ -11,8 +11,8 @@ import ( corev1 "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" @@ -65,7 +65,6 @@ func (nqosState *networkQoSState) initAddressSets(addressSetFactory addressset.A if err != nil { return fmt.Errorf("failed to init source address set for %s/%s: %w", nqosState.namespace, nqosState.name, err) } - // ensure destination address sets for ruleIndex, rule := range nqosState.EgressRules { for destIndex, dest := range rule.Classifier.Destinations { @@ -191,6 +190,45 @@ func (nqosState *networkQoSState) getAddressSetHashNames() []string { return addrsetNames } +func (nqosState *networkQoSState) cleanupStaleAddresses(addressSetMap map[string]sets.Set[string]) error { + if nqosState.SrcAddrSet != nil { + addresses := addressSetMap[nqosState.SrcAddrSet.GetName()] + v4Addresses, _ := nqosState.SrcAddrSet.GetAddresses() + staleAddresses := []string{} + for _, address := range v4Addresses { + if !addresses.Has(address) { + staleAddresses = append(staleAddresses, address) + } + } + if len(staleAddresses) > 0 { + if err := nqosState.SrcAddrSet.DeleteAddresses(staleAddresses); err != nil { + return err + } + } + } + for _, egress := range nqosState.EgressRules { + for _, dest := range egress.Classifier.Destinations { + if dest.DestAddrSet == nil { + continue + } + addresses := addressSetMap[dest.DestAddrSet.GetName()] + v4Addresses, _ := dest.DestAddrSet.GetAddresses() + staleAddresses := []string{} + for _, address := range v4Addresses { + if !addresses.Has(address) { + staleAddresses = append(staleAddresses, address) + } + } + if len(staleAddresses) > 0 { + if err := dest.DestAddrSet.DeleteAddresses(staleAddresses); err != nil { + return err + } + } + } + } + return nil +} + type GressRule struct { Priority int Dscp int @@ -309,13 +347,6 @@ type Destination struct { NamespaceSelector labels.Selector } -func (dest *Destination) matchNamespace(podNs *corev1.Namespace, qosNamespace string) bool { - if dest.NamespaceSelector == nil { - return podNs.Name == qosNamespace - } - return dest.NamespaceSelector.Matches(labels.Set(podNs.Labels)) -} - func (dest *Destination) matchPod(podNs *corev1.Namespace, pod *corev1.Pod, qosNamespace string) bool { switch { case dest.NamespaceSelector != nil && dest.PodSelector != nil: @@ -352,47 +383,6 @@ func (dest *Destination) removePod(fullPodName string, addresses []string) error return nil } -func (dest *Destination) removePodsInNamespace(namespace string) error { - var err error - // check for pods in the namespace being cleared - dest.Pods.Range(func(key, _ any) bool { - fullPodName := key.(string) - nameParts := strings.Split(fullPodName, "/") - if nameParts[0] != namespace { - // pod's namespace doesn't match - return true - } - err = dest.removePod(fullPodName, nil) - return err == nil - }) - return err -} - -func (dest *Destination) addPodsInNamespace(ctrl *Controller, namespace string) error { - podSelector := labels.Everything() - if dest.PodSelector != nil { - podSelector = dest.PodSelector - } - pods, err := ctrl.nqosPodLister.Pods(namespace).List(podSelector) - if err != nil { - if apierrors.IsNotFound(err) || len(pods) == 0 { - return nil - } - return fmt.Errorf("failed to look up pods in ns %s: %v", namespace, err) - } - klog.V(5).Infof("Found %d pods in namespace %s by selector %s", len(pods), namespace, podSelector.String()) - for _, pod := range pods { - podAddresses, err := getPodAddresses(pod, ctrl.NetInfo) - if err != nil { - return fmt.Errorf("failed to parse IPs for pod %s/%s: %v", pod.Namespace, pod.Name, err) - } - if err := dest.addPod(pod.Namespace, pod.Name, podAddresses); err != nil { - return fmt.Errorf("failed to add addresses {%s} to address set %s: %v", strings.Join(podAddresses, ","), dest.DestAddrSet.GetName(), err) - } - } - return nil -} - func getQoSRulePriority(qosPriority, ruleIndex int) int { return 10000 + qosPriority*10 + ruleIndex } From a000184509b3578ab6aa7429734c34d282f09ef6 Mon Sep 17 00:00:00 2001 From: Xiaobin Qu Date: Wed, 30 Apr 2025 13:20:21 -0700 Subject: [PATCH 16/18] revert to use string key for networkqos Signed-off-by: Xiaobin Qu (cherry picked from commit 21fd64abc5df71396e574a5bc4eea9d25b0cf561) --- .../ovn/controller/network_qos/network_qos.go | 44 ++++++++++++++----- .../network_qos/network_qos_controller.go | 43 ++++++++---------- .../network_qos/network_qos_namespace.go | 8 ++-- .../network_qos/network_qos_node.go | 2 +- .../controller/network_qos/network_qos_pod.go | 16 +++---- 5 files changed, 63 insertions(+), 50 deletions(-) diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos.go b/go-controller/pkg/ovn/controller/network_qos/network_qos.go index 86e280ca93..1e05938026 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos.go @@ -16,6 +16,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" metaapplyv1 "k8s.io/client-go/applyconfigurations/meta/v1" + "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" "k8s.io/utils/ptr" @@ -29,38 +30,45 @@ import ( func (c *Controller) processNextNQOSWorkItem(wg *sync.WaitGroup) bool { wg.Add(1) defer wg.Done() - nqos, quit := c.nqosQueue.Get() + nqosKey, quit := c.nqosQueue.Get() if quit { return false } - defer c.nqosQueue.Done(nqos) + defer c.nqosQueue.Done(nqosKey) - if err := c.syncNetworkQoS(nqos); err != nil { - if c.nqosQueue.NumRequeues(nqos) < maxRetries { - c.nqosQueue.AddRateLimited(nqos) + if err := c.syncNetworkQoS(nqosKey); err != nil { + if c.nqosQueue.NumRequeues(nqosKey) < maxRetries { + c.nqosQueue.AddRateLimited(nqosKey) return true } - klog.Warningf("%s: Failed to reconcile NetworkQoS %s/%s: %v", c.controllerName, nqos.Namespace, nqos.Name, err) - utilruntime.HandleError(fmt.Errorf("failed to reconcile NetworkQoS %s/%s: %v", nqos.Namespace, nqos.Name, err)) + klog.Warningf("%s: Failed to reconcile NetworkQoS %s: %v", c.controllerName, nqosKey, err) + utilruntime.HandleError(fmt.Errorf("failed to reconcile NetworkQoS %s: %v", nqosKey, err)) } - c.nqosQueue.Forget(nqos) + c.nqosQueue.Forget(nqosKey) return true } // syncNetworkQoS decides the main logic everytime // we dequeue a key from the nqosQueue cache -func (c *Controller) syncNetworkQoS(nqos *networkqosapi.NetworkQoS) error { +func (c *Controller) syncNetworkQoS(key string) error { + nqosNamespace, nqosName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } startTime := time.Now() - key := joinMetaNamespaceAndName(nqos.Namespace, nqos.Name) defer func() { klog.V(5).Infof("%s - Finished reconciling NetworkQoS %s : %v", c.controllerName, key, time.Since(startTime)) }() klog.V(5).Infof("%s - reconciling NetworkQoS %s", c.controllerName, key) - if nqos.DeletionTimestamp != nil { + nqos, err := c.nqosLister.NetworkQoSes(nqosNamespace).Get(nqosName) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + if nqos == nil || !nqos.DeletionTimestamp.IsZero() { klog.V(6).Infof("%s - NetworkQoS %s is being deleted.", c.controllerName, key) return c.nqosCache.DoWithLock(key, func(_ string) error { - return c.clearNetworkQos(nqos.Namespace, nqos.Name) + return c.clearNetworkQos(nqosNamespace, nqosName) }) } @@ -302,6 +310,18 @@ func (c *Controller) resyncPods(nqosState *networkQoSState) error { var cudnController = udnv1.SchemeGroupVersion.WithKind("ClusterUserDefinedNetwork") +// networkManagedByMe determines if any of the networks specified in the networkSelectors are managed by this controller. +// It returns true if: +// - Multi-network is disabled (nadLister is nil) and this is the default network controller +// - No selectors are provided and this is the default network controller +// - Any of the selected networks match one of these criteria: +// - The selector is for the default network and this is the default network controller +// - The selector is for cluster user defined networks (CUDNs) and any of the matching NADs are controlled by a CUDN +// - The selector is for network attachment definitions (NADs) and any of the matching NADs are managed by this controller +// +// Returns an error if: +// - Any of the network selectors are invalid or empty +// - There is an error listing network attachment definitions func (c *Controller) networkManagedByMe(networkSelectors crdtypes.NetworkSelectors) (bool, error) { // return c.IsDefault() if multi-network is disabled or no selectors is provided in spec if c.nadLister == nil || len(networkSelectors) == 0 { diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go index 59ff1969c4..10cf3b76c5 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go @@ -71,7 +71,7 @@ type Controller struct { nqosCache *syncmap.SyncMap[*networkQoSState] // queues for the CRDs where incoming work is placed to de-dup - nqosQueue workqueue.TypedRateLimitingInterface[*networkqosapi.NetworkQoS] + nqosQueue workqueue.TypedRateLimitingInterface[string] // cached access to nqos objects nqosLister networkqoslister.NetworkQoSLister nqosCacheSynced cache.InformerSynced @@ -155,8 +155,8 @@ func NewController( c.nqosLister = nqosInformer.Lister() c.nqosCacheSynced = nqosInformer.Informer().HasSynced c.nqosQueue = workqueue.NewTypedRateLimitingQueueWithConfig( - workqueue.NewTypedItemFastSlowRateLimiter[*networkqosapi.NetworkQoS](1*time.Second, 5*time.Second, 5), - workqueue.TypedRateLimitingQueueConfig[*networkqosapi.NetworkQoS]{Name: "networkQoS"}, + workqueue.NewTypedItemFastSlowRateLimiter[string](1*time.Second, 5*time.Second, 5), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "networkQoS"}, ) _, err := nqosInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onNQOSAdd, @@ -334,12 +334,12 @@ func (c *Controller) runNQOSNodeWorker(wg *sync.WaitGroup) { // onNQOSAdd queues the NQOS for processing. func (c *Controller) onNQOSAdd(obj any) { - nqos, ok := obj.(*networkqosapi.NetworkQoS) - if !ok { - utilruntime.HandleError(fmt.Errorf("expecting NetworkQoS but received %T", obj)) + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) return } - c.nqosQueue.Add(nqos) + c.nqosQueue.Add(key) } // onNQOSUpdate updates the NQOS Selector in the cache and queues the NQOS for processing. @@ -359,30 +359,23 @@ func (c *Controller) onNQOSUpdate(oldObj, newObj any) { !newNQOS.GetDeletionTimestamp().IsZero() { return } - if reflect.DeepEqual(oldNQOS.Spec, newNQOS.Spec) { - return + key, err := cache.MetaNamespaceKeyFunc(newObj) + if err == nil { + // updates to NQOS object should be very rare, once put in place they usually stay the same + klog.V(4).Infof("Updating Network QoS %s: nqosSpec %v", + key, newNQOS.Spec) + c.nqosQueue.Add(key) } - c.nqosQueue.Add(newNQOS) } // onNQOSDelete queues the NQOS for processing. func (c *Controller) onNQOSDelete(obj interface{}) { - nqos, ok := obj.(*networkqosapi.NetworkQoS) - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) - return - } - nqos, ok = tombstone.Obj.(*networkqosapi.NetworkQoS) - if !ok { - utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a NetworkQoS: %#v", tombstone.Obj)) - return - } - } - if nqos != nil { - c.nqosQueue.Add(nqos) + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return } + c.nqosQueue.Add(key) } // onNQOSNamespaceAdd queues the namespace for processing. diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go index d4363b8acb..a1c99f0d4f 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_namespace.go @@ -56,8 +56,8 @@ func (c *Controller) syncNetworkQoSNamespace(eventData *eventData[*corev1.Namesp } // getNetworkQosForNamespaceChange returns the set of NetworkQoS names that are affected by the namespace change -func (c *Controller) getNetworkQosForNamespaceChange(eventData *eventData[*corev1.Namespace]) (sets.Set[*nqosv1alpha1.NetworkQoS], error) { - networkQoSes := sets.Set[*nqosv1alpha1.NetworkQoS]{} +func (c *Controller) getNetworkQosForNamespaceChange(eventData *eventData[*corev1.Namespace]) (sets.Set[string], error) { + networkQoSes := sets.Set[string]{} nqoses, err := c.getAllNetworkQoSes() if err != nil { return nil, err @@ -69,12 +69,12 @@ func (c *Controller) getNetworkQosForNamespaceChange(eventData *eventData[*corev } // check if any network selector matches the namespace, or ns label change affects the network selection if namespaceMatchesNetworkSelector(ns, nqos) || networkSelectionChanged(nqos, eventData.new, eventData.old) { - networkQoSes.Insert(nqos) + networkQoSes.Insert(joinMetaNamespaceAndName(nqos.Namespace, nqos.Name)) continue } // check if any egress rule matches the namespace, or ns label change affects the egress selection if namespaceMatchesEgressRule(ns, nqos) || egressSelectionChanged(nqos, eventData.new, eventData.old) { - networkQoSes.Insert(nqos) + networkQoSes.Insert(joinMetaNamespaceAndName(nqos.Namespace, nqos.Name)) } } return networkQoSes, nil diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go index d8ac3d0925..8a78883044 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_node.go @@ -56,7 +56,7 @@ func (c *Controller) syncNetworkQoSNode(key string) error { if nqos, err := c.nqosLister.NetworkQoSes(ns).Get(name); err != nil { klog.Errorf("Failed to get NetworkQoS %s: %v", nqosName, err) } else if nqos != nil { - c.nqosQueue.Add(nqos) + c.nqosQueue.Add(joinMetaNamespaceAndName(nqos.Namespace, nqos.Name)) } } return nil diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go index 44ac375b6c..625a8549ee 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_pod.go @@ -41,12 +41,12 @@ func (c *Controller) processNextNQOSPodWorkItem(wg *sync.WaitGroup) bool { // we dequeue a key from the nqosPodQueue cache func (c *Controller) syncNetworkQoSPod(eventData *eventData[*corev1.Pod]) error { startTime := time.Now() - networkQoSes, err := c.getNetworkQosForPodChange(eventData) + nqosNames, err := c.getNetworkQosForPodChange(eventData) if err != nil { return err } - for nqos := range networkQoSes { - c.nqosQueue.Add(nqos) + for nqosName := range nqosNames { + c.nqosQueue.Add(nqosName) } recordPodReconcileDuration(c.controllerName, time.Since(startTime).Milliseconds()) return nil @@ -113,7 +113,7 @@ func reconcilePodForDestinations(nqosState *networkQoSState, podNs *corev1.Names return nil } -func (c *Controller) getNetworkQosForPodChange(eventData *eventData[*corev1.Pod]) (sets.Set[*nqosv1alpha1.NetworkQoS], error) { +func (c *Controller) getNetworkQosForPodChange(eventData *eventData[*corev1.Pod]) (sets.Set[string], error) { var pod *corev1.Pod if eventData.new != nil { pod = eventData.new @@ -128,21 +128,21 @@ func (c *Controller) getNetworkQosForPodChange(eventData *eventData[*corev1.Pod] if err != nil { return nil, err } - affectedNetworkQoSes := sets.Set[*nqosv1alpha1.NetworkQoS]{} + affectedNetworkQoSes := sets.Set[string]{} for _, nqos := range nqoses { if podMatchesSourceSelector(pod, nqos) { - affectedNetworkQoSes.Insert(nqos) + affectedNetworkQoSes.Insert(joinMetaNamespaceAndName(nqos.Namespace, nqos.Name)) continue } // check if pod matches any egress for _, egress := range nqos.Spec.Egress { if podMatchesEgressSelector(podNs, pod, nqos, &egress) { - affectedNetworkQoSes.Insert(nqos) + affectedNetworkQoSes.Insert(joinMetaNamespaceAndName(nqos.Namespace, nqos.Name)) continue } } if podSelectionChanged(nqos, eventData.new, eventData.old) { - affectedNetworkQoSes.Insert(nqos) + affectedNetworkQoSes.Insert(joinMetaNamespaceAndName(nqos.Namespace, nqos.Name)) } } return affectedNetworkQoSes, nil From b21a43360ef877d4b23d852d2a029467772f497d Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Tue, 6 May 2025 13:01:12 +0200 Subject: [PATCH 17/18] Revert "Add flow for host -> localnet on same node" This reverts commit 0ee80bf943f9b9489b353d2d702656818d3fff77. Conflict in gateway_shared_intf.go because of not having https://github.com/ovn-kubernetes/ovn-kubernetes/pull/5153/files#diff-d3aa58d9b58a0a09264f072df46ab01d0501eb508c4656411ae2dc1ac68fb3c4 Signed-off-by: Surya Seetharaman (cherry picked from commit ebb73398310c882902f0f8b297bb8386d039ecfc) (cherry picked from commit b3760a19578b62523462178ef7ec778f6fad3d53) --- go-controller/pkg/node/gateway_shared_intf.go | 107 +++++++----------- 1 file changed, 41 insertions(+), 66 deletions(-) diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index acdc814430..4f8d4bfea7 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -1937,10 +1937,9 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, config.Default.ConntrackZone, netConfig.masqCTMark, ofPortPhys)) - // Allow (a) OVN->host traffic on the same node - // (b) host->host traffic on the same node + // Allow OVN->Host traffic on the same node if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { - dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, false)...) + dftFlows = append(dftFlows, ovnToHostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, false)...) } } else { // for UDN we additionally SNAT the packet from masquerade IP -> node IP @@ -2034,10 +2033,9 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, config.Default.ConntrackZone, netConfig.masqCTMark, ofPortPhys)) - // Allow (a) OVN->host traffic on the same node - // (b) host->host traffic on the same node + // Allow OVN->Host traffic on the same node if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { - dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, true)...) + dftFlows = append(dftFlows, ovnToHostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, true)...) } } else { // for UDN we additionally SNAT the packet from masquerade IP -> node IP @@ -2217,15 +2215,23 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin return dftFlows, nil } -// hostNetworkNormalActionFlows returns the flows that allow IP{v4,v6} traffic: -// a. from pods in the OVN network to pods in a localnet network, on the same node -// b. from pods on the host to pods in a localnet network, on the same node -// when the localnet is mapped to breth0. -// The expected srcMAC is the MAC address of breth0 and the expected hostSubnets is the host subnets found on the node -// primary interface. -func hostNetworkNormalActionFlows(netConfig *bridgeUDNConfiguration, srcMAC string, hostSubnets []*net.IPNet, isV6 bool) []string { +// ovnToHostNetworkNormalActionFlows returns the flows that allow IP{v4,v6} traffic from the OVN network to the host network +// when the destination is on the same node as the sender. This is necessary for pods in the default network to reach +// localnet pods on the same node, when the localnet is mapped to breth0. The expected srcMAC is the MAC address of breth0 +// and the expected hostSubnets is the host subnets found on the node primary interface. +func ovnToHostNetworkNormalActionFlows(netConfig *bridgeUDNConfiguration, srcMAC string, hostSubnets []*net.IPNet, isV6 bool) []string { + var inPort, ctMark, ipFamily, ipFamilyDest string var flows []string - var ipFamily, ipFamilyDest string + + if config.Gateway.Mode == config.GatewayModeShared { + inPort = netConfig.ofPortPatch + ctMark = netConfig.masqCTMark + } else if config.Gateway.Mode == config.GatewayModeLocal { + inPort = "LOCAL" + ctMark = ctMarkHost + } else { + return nil + } if isV6 { ipFamily = "ipv6" @@ -2235,69 +2241,38 @@ func hostNetworkNormalActionFlows(netConfig *bridgeUDNConfiguration, srcMAC stri ipFamilyDest = "nw_dst" } - formatFlow := func(inPort, destIP, ctMark string) string { - // Matching IP traffic will be handled by the bridge instead of being output directly - // to the NIC by the existing flow at prio=100. - flowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, %s, %s=%s, " + - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" - return fmt.Sprintf(flowTemplate, - defaultOpenFlowCookie, - inPort, - srcMAC, - ipFamily, - ipFamilyDest, - destIP, - config.Default.ConntrackZone, - ctMark) - } - - // Traffic path (a): OVN->localnet for shared gw mode - if config.Gateway.Mode == config.GatewayModeShared { - for _, hostSubnet := range hostSubnets { - if utilnet.IsIPv6(hostSubnet.IP) != isV6 { - continue - } - flows = append(flows, formatFlow(netConfig.ofPortPatch, hostSubnet.String(), netConfig.masqCTMark)) - } - } - - // Traffic path (a): OVN->localnet for local gw mode - // Traffic path (b): host->localnet for both gw modes for _, hostSubnet := range hostSubnets { - if utilnet.IsIPv6(hostSubnet.IP) != isV6 { + if (hostSubnet.IP.To4() == nil) != isV6 { continue } - flows = append(flows, formatFlow("LOCAL", hostSubnet.String(), ctMarkHost)) - } - - if isV6 { - // IPv6 neighbor discovery uses ICMPv6 messages sent to a special destination (ff02::1:ff00:0/104) - // that is unrelated to the host subnets matched in the prio=102 flow above. - // Allow neighbor discovery by matching against ICMP type and ingress port. - formatICMPFlow := func(inPort, ctMark string, icmpType int) string { - icmpFlowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, icmp6, icmpv6_type=%d, " + - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" - return fmt.Sprintf(icmpFlowTemplate, + // IP traffic from the OVN network to the host network should be handled normally by the bridge instead of + // being output directly to the NIC by the existing flow at prio=100. + flows = append(flows, + fmt.Sprintf("cookie=%s, priority=102, in_port=%s, dl_src=%s, %s, %s=%s, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL", defaultOpenFlowCookie, inPort, srcMAC, - icmpType, + ipFamily, + ipFamilyDest, + hostSubnet.String(), config.Default.ConntrackZone, - ctMark) - } + ctMark)) + } + if isV6 { + // Neighbor discovery in IPv6 happens through ICMPv6 messages to a special destination (ff02::1:ff00:0/104), + // which has nothing to do with the host subnets we're matching against in the flow above at prio=102. + // Let's allow neighbor discovery by matching against icmp type and in_port. for _, icmpType := range []int{types.NeighborSolicitationICMPType, types.NeighborAdvertisementICMPType} { - // Traffic path (a) for ICMP: OVN-> localnet for shared gw mode - if config.Gateway.Mode == config.GatewayModeShared { - flows = append(flows, - formatICMPFlow(netConfig.ofPortPatch, netConfig.masqCTMark, icmpType)) - } - - // Traffic path (a) for ICMP: OVN->localnet for local gw mode - // Traffic path (b) for ICMP: host->localnet for both gw modes - flows = append(flows, formatICMPFlow("LOCAL", ctMarkHost, icmpType)) + flows = append(flows, + fmt.Sprintf("cookie=%s, priority=102, in_port=%s, dl_src=%s, icmp6, icmpv6_type=%d, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL", + defaultOpenFlowCookie, inPort, srcMAC, icmpType, + config.Default.ConntrackZone, ctMark)) } } + return flows } From 755fdc4fe22703577d32f97199c85406ba793e0e Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Tue, 6 May 2025 13:02:30 +0200 Subject: [PATCH 18/18] Revert "e2e: connect to host-networked pod from localnet" This reverts commit b1525c32b2fdb7687eb6d3922636117b300f6a56. Signed-off-by: Surya Seetharaman (cherry picked from commit 936e6214a82062eb51f0649db2f74dcb9d205e12) (cherry picked from commit def29099b85ee99853a488f4b4810a510eeb018a) --- test/e2e/multihoming.go | 67 +++-------------------------------- test/e2e/multihoming_utils.go | 45 +++++++---------------- 2 files changed, 17 insertions(+), 95 deletions(-) diff --git a/test/e2e/multihoming.go b/test/e2e/multihoming.go index 7715469c3d..88b15568d8 100644 --- a/test/e2e/multihoming.go +++ b/test/e2e/multihoming.go @@ -331,31 +331,17 @@ var _ = Describe("Multi Homing", func() { kickstartPod(cs, clientPodConfig) // Check that the client pod can reach the server pod on the server localnet interface - var serverIPs []string - if serverPodConfig.hostNetwork { - serverIPs, err = podIPsFromStatus(cs, serverPodConfig.namespace, serverPodConfig.name) - } else { - serverIPs, err = podIPsForAttachment(cs, serverPod.Namespace, serverPod.Name, netConfig.name) - - } + serverIPs, err := podIPsForAttachment(cs, f.Namespace.Name, serverPod.GetName(), netConfig.name) Expect(err).NotTo(HaveOccurred()) - for _, serverIP := range serverIPs { By(fmt.Sprintf("asserting the *client* can contact the server pod exposed endpoint: %q on port %q", serverIP, port)) - curlArgs := []string{} - pingArgs := []string{} - if clientPodConfig.attachments != nil { - // When the client is attached to a localnet, send probes from the localnet interface - curlArgs = []string{"--interface", "net1"} - pingArgs = []string{"-I", "net1"} - } Eventually(func() error { - return reachServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP, port, curlArgs...) + return reachServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP, port) }, 2*time.Minute, 6*time.Second).Should(Succeed()) By(fmt.Sprintf("asserting the *client* can ping the server pod exposed endpoint: %q", serverIP)) Eventually(func() error { - return pingServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP, pingArgs...) + return pingServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP) }, 2*time.Minute, 6*time.Second).Should(Succeed()) } }, @@ -403,52 +389,6 @@ var _ = Describe("Multi Homing", func() { }, Label("BUG", "OCPBUGS-43004"), ), - ginkgo.Entry( - "can reach a host-networked pod on a different node", - networkAttachmentConfigParams{ - name: secondaryNetworkName, - topology: "localnet", - }, - podConfiguration{ // client on localnet - attachments: []nadapi.NetworkSelectionElement{{ - Name: secondaryNetworkName, - }}, - name: clientPodName, - nodeSelector: map[string]string{nodeHostnameKey: workerOneNodeName}, - isPrivileged: true, - needsIPRequestFromHostSubnet: true, - }, - podConfiguration{ // server on default network, pod is host-networked - name: podName, - containerCmd: httpServerContainerCmd(port), - nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, - hostNetwork: true, - }, - Label("STORY", "SDN-5345"), - ), - ginkgo.Entry( - "can reach a host-networked pod on the same node", - networkAttachmentConfigParams{ - name: secondaryNetworkName, - topology: "localnet", - }, - podConfiguration{ // client on localnet - attachments: []nadapi.NetworkSelectionElement{{ - Name: secondaryNetworkName, - }}, - name: clientPodName, - nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, - isPrivileged: true, - needsIPRequestFromHostSubnet: true, - }, - podConfiguration{ // server on default network, pod is host-networked - name: podName, - containerCmd: httpServerContainerCmd(port), - nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, - hostNetwork: true, - }, - Label("STORY", "SDN-5345"), - ), ) }) @@ -908,6 +848,7 @@ var _ = Describe("Multi Homing", func() { Context("localnet OVN-K secondary network", func() { const ( clientPodName = "client-pod" + nodeHostnameKey = "kubernetes.io/hostname" servicePort = 9000 dockerNetworkName = "underlay" underlayServiceIP = "60.128.0.1" diff --git a/test/e2e/multihoming_utils.go b/test/e2e/multihoming_utils.go index 7ddd109eae..1c0d1a7435 100644 --- a/test/e2e/multihoming_utils.go +++ b/test/e2e/multihoming_utils.go @@ -161,7 +161,6 @@ type podConfiguration struct { isPrivileged bool labels map[string]string requiresExtraNamespace bool - hostNetwork bool needsIPRequestFromHostSubnet bool } @@ -172,7 +171,6 @@ func generatePodSpec(config podConfiguration) *v1.Pod { } podSpec.Spec.NodeSelector = config.nodeSelector podSpec.Labels = config.labels - podSpec.Spec.HostNetwork = config.hostNetwork if config.isPrivileged { podSpec.Spec.Containers[0].SecurityContext.Privileged = ptr.To(true) } else { @@ -255,19 +253,17 @@ func inRange(cidr string, ip string) error { return fmt.Errorf("ip [%s] is NOT in range %s", ip, cidr) } -func connectToServer(clientPodConfig podConfiguration, serverIP string, port int, args ...string) error { - target := net.JoinHostPort(serverIP, fmt.Sprintf("%d", port)) - baseArgs := []string{ +func connectToServer(clientPodConfig podConfiguration, serverIP string, port int) error { + _, err := e2ekubectl.RunKubectl( + clientPodConfig.namespace, "exec", clientPodConfig.name, "--", "curl", "--connect-timeout", "2", - } - baseArgs = append(baseArgs, args...) - - _, err := e2ekubectl.RunKubectl(clientPodConfig.namespace, append(baseArgs, target)...) + net.JoinHostPort(serverIP, fmt.Sprintf("%d", port)), + ) return err } @@ -312,19 +308,16 @@ func getSecondaryInterfaceMTU(clientPodConfig podConfiguration) (int, error) { return mtu, nil } -func pingServer(clientPodConfig podConfiguration, serverIP string, args ...string) error { - baseArgs := []string{ +func pingServer(clientPodConfig podConfiguration, serverIP string) error { + _, err := e2ekubectl.RunKubectl( + clientPodConfig.namespace, "exec", clientPodConfig.name, "--", "ping", "-c", "1", // send one ICMP echo request "-W", "2", // timeout after 2 seconds if no response - } - baseArgs = append(baseArgs, args...) - - _, err := e2ekubectl.RunKubectl(clientPodConfig.namespace, append(baseArgs, serverIP)...) - + serverIP) return err } @@ -388,18 +381,6 @@ func podIPForAttachment(k8sClient clientset.Interface, podNamespace string, podN return ips[ipIndex], nil } -func podIPsFromStatus(k8sClient clientset.Interface, podNamespace string, podName string) ([]string, error) { - pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - podIPs := make([]string, 0, len(pod.Status.PodIPs)) - for _, podIP := range pod.Status.PodIPs { - podIPs = append(podIPs, podIP.IP) - } - return podIPs, nil -} - func allowedClient(podName string) string { return "allowed-" + podName } @@ -629,27 +610,27 @@ func allowedTCPPortsForPolicy(allowPorts ...int) []mnpapi.MultiNetworkPolicyPort return portAllowlist } -func reachServerPodFromClient(cs clientset.Interface, serverConfig podConfiguration, clientConfig podConfiguration, serverIP string, serverPort int, args ...string) error { +func reachServerPodFromClient(cs clientset.Interface, serverConfig podConfiguration, clientConfig podConfiguration, serverIP string, serverPort int) error { updatedPod, err := cs.CoreV1().Pods(serverConfig.namespace).Get(context.Background(), serverConfig.name, metav1.GetOptions{}) if err != nil { return err } if updatedPod.Status.Phase == v1.PodRunning { - return connectToServer(clientConfig, serverIP, serverPort, args...) + return connectToServer(clientConfig, serverIP, serverPort) } return fmt.Errorf("pod not running. /me is sad") } -func pingServerPodFromClient(cs clientset.Interface, serverConfig podConfiguration, clientConfig podConfiguration, serverIP string, args ...string) error { +func pingServerPodFromClient(cs clientset.Interface, serverConfig podConfiguration, clientConfig podConfiguration, serverIP string) error { updatedPod, err := cs.CoreV1().Pods(serverConfig.namespace).Get(context.Background(), serverConfig.name, metav1.GetOptions{}) if err != nil { return err } if updatedPod.Status.Phase == v1.PodRunning { - return pingServer(clientConfig, serverIP, args...) + return pingServer(clientConfig, serverIP) } return fmt.Errorf("pod not running. /me is sad")