Skip to content

prove the clientbuilder works with RC controller #14033

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
May 10, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 37 additions & 0 deletions pkg/cmd/server/bootstrappolicy/dead.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
package bootstrappolicy

import (
"github.com/golang/glog"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

authorizationapi "github.com/openshift/origin/pkg/authorization/api"
)

var (
deadClusterRoles = []authorizationapi.ClusterRole{}
)

func addDeadClusterRole(name string) {
for _, existingRole := range deadClusterRoles {
if name == existingRole.Name {
glog.Fatalf("role %q was already registered", name)
}
}

deadClusterRoles = append(deadClusterRoles,
authorizationapi.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: name},
},
)
}

// GetDeadClusterRoles returns cluster roles which should no longer have any permissions.
// These are enumerated so that a reconcile that tightens permissions will properly.
func GetDeadClusterRoles() []authorizationapi.ClusterRole {
return deadClusterRoles
}

func init() {
addDeadClusterRole("system:replication-controller")
}
48 changes: 0 additions & 48 deletions pkg/cmd/server/bootstrappolicy/infra_sa_policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,6 @@ const (
InfraBuildControllerServiceAccountName = "build-controller"
BuildControllerRoleName = "system:build-controller"

InfraReplicationControllerServiceAccountName = "replication-controller"
ReplicationControllerRoleName = "system:replication-controller"

InfraReplicaSetControllerServiceAccountName = "replicaset-controller"
ReplicaSetControllerRoleName = "system:replicaset-controller"

Expand Down Expand Up @@ -290,51 +287,6 @@ func init() {
panic(err)
}

err = InfraSAs.addServiceAccount(
InfraReplicationControllerServiceAccountName,
authorizationapi.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: ReplicationControllerRoleName,
},
Rules: []authorizationapi.PolicyRule{
// ReplicationManager.rcController.ListWatch
{
Verbs: sets.NewString("list", "watch"),
Resources: sets.NewString("replicationcontrollers"),
},
// ReplicationManager.syncReplicationController() -> updateReplicaCount()
{
// TODO: audit/remove those, 1.0 controllers needed get, update
Verbs: sets.NewString("get", "update"),
Resources: sets.NewString("replicationcontrollers"),
},
// ReplicationManager.syncReplicationController() -> updateReplicaCount()
{
Verbs: sets.NewString("update"),
Resources: sets.NewString("replicationcontrollers/status"),
},
// ReplicationManager.podController.ListWatch
{
Verbs: sets.NewString("list", "watch"),
Resources: sets.NewString("pods"),
},
// ReplicationManager.podControl (RealPodControl)
{
Verbs: sets.NewString("create", "delete", "patch"),
Resources: sets.NewString("pods"),
},
// ReplicationManager.podControl.recorder
{
Verbs: sets.NewString("create", "update", "patch"),
Resources: sets.NewString("events"),
},
},
},
)
if err != nil {
panic(err)
}

err = InfraSAs.addServiceAccount(
InfraReplicaSetControllerServiceAccountName,
authorizationapi.ClusterRole{
Expand Down
3 changes: 3 additions & 0 deletions pkg/cmd/server/bootstrappolicy/policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -941,6 +941,9 @@ func GetOpenshiftBootstrapClusterRoles() []authorizationapi.ClusterRole {

func GetBootstrapClusterRoles() []authorizationapi.ClusterRole {
openshiftClusterRoles := GetOpenshiftBootstrapClusterRoles()
// dead cluster roles need to be checked for conflicts (in case something new comes up)
// so add them to this list.
openshiftClusterRoles = append(openshiftClusterRoles, GetDeadClusterRoles()...)
openshiftSAClusterRoles := InfraSAs.AllRoles()
kubeClusterRoles, err := GetKubeBootstrapClusterRoles()
// coder error
Expand Down
12 changes: 0 additions & 12 deletions pkg/cmd/server/kubernetes/master/master.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ import (
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
gccontroller "k8s.io/kubernetes/pkg/controller/podgc"
replicasetcontroller "k8s.io/kubernetes/pkg/controller/replicaset"
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
servicecontroller "k8s.io/kubernetes/pkg/controller/service"
statefulsetcontroller "k8s.io/kubernetes/pkg/controller/statefulset"
attachdetachcontroller "k8s.io/kubernetes/pkg/controller/volume/attachdetach"
Expand Down Expand Up @@ -207,17 +206,6 @@ func (c *MasterConfig) RunReplicaSetController(client kclientset.Interface) {
go controller.Run(int(c.ControllerManager.ConcurrentRSSyncs), utilwait.NeverStop)
}

// RunReplicationController starts the Kubernetes replication controller sync loop
func (c *MasterConfig) RunReplicationController(client kclientset.Interface) {
controllerManager := replicationcontroller.NewReplicationManager(
c.Informers.KubernetesInformers().Core().V1().Pods(),
c.Informers.KubernetesInformers().Core().V1().ReplicationControllers(),
client,
replicationcontroller.BurstReplicas,
)
go controllerManager.Run(int(c.ControllerManager.ConcurrentRCSyncs), utilwait.NeverStop)
}

func (c *MasterConfig) RunDeploymentController(client kclientset.Interface) {
controller := deployment.NewDeploymentController(
c.Informers.KubernetesInformers().Extensions().V1beta1().Deployments(),
Expand Down
79 changes: 74 additions & 5 deletions pkg/cmd/server/start/start_master.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,11 @@ import (

kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
utilwait "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
restclient "k8s.io/client-go/rest"
kctrlmgr "k8s.io/kubernetes/cmd/kube-controller-manager/app"
cmapp "k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/apps"
Expand All @@ -27,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/controller"
kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"

Expand Down Expand Up @@ -602,10 +606,6 @@ func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) erro
oc.RunSecurityAllocationController()

if kc != nil {
_, _, _, rcClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraReplicationControllerServiceAccountName)
if err != nil {
glog.Fatalf("Could not get client for replication controller: %v", err)
}
_, _, _, rsClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraReplicaSetControllerServiceAccountName)
if err != nil {
glog.Fatalf("Could not get client for replication controller: %v", err)
Expand Down Expand Up @@ -687,10 +687,79 @@ func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) erro
glog.Fatalf("Could not get client for garbage collector controller: %v", err)
}

rootClientBuilder := controller.SimpleControllerClientBuilder{
ClientConfig: &oc.PrivilegedLoopbackClientConfig,
}
saClientBuilder := controller.SAControllerClientBuilder{
ClientConfig: restclient.AnonymousClientConfig(&oc.PrivilegedLoopbackClientConfig),
CoreClient: oc.PrivilegedLoopbackKubernetesClientsetExternal.Core(),
AuthenticationClient: oc.PrivilegedLoopbackKubernetesClientsetExternal.Authentication(),
Namespace: "kube-system",
}
availableResources, err := kctrlmgr.GetAvailableResources(rootClientBuilder)
if err != nil {
return err
}

controllerContext := kctrlmgr.ControllerContext{
ClientBuilder: saClientBuilder,
InformerFactory: oc.Informers.KubernetesInformers(),
Options: *controllerManagerOptions,
AvailableResources: availableResources,
Stop: utilwait.NeverStop,
}
controllerInitializers := kctrlmgr.NewControllerInitializers()

// TODO remove this. Using it now to control the migration
allowedControllers := sets.NewString(
// "endpoint",
"replicationcontroller",
// "podgc",
// "resourcequota",
// "namespace",
// "serviceaccount",
// "garbagecollector",
// "daemonset",
// "job",
// "deployment",
// "replicaset",
// "horizontalpodautoscaling",
// "disruption",
// "statefuleset",
// "cronjob",
// "certificatesigningrequests",
// "ttl",
// "bootstrapsigner",
// "tokencleaner",
)

for controllerName, initFn := range controllerInitializers {
// TODO remove this. Only call one to start to prove the principle
if !allowedControllers.Has(controllerName) {
glog.Warningf("%q is skipped", controllerName)
continue
}
if !controllerContext.IsControllerEnabled(controllerName) {
glog.Warningf("%q is disabled", controllerName)
continue
}

glog.V(1).Infof("Starting %q", controllerName)
started, err := initFn(controllerContext)
if err != nil {
glog.Errorf("Error starting %q", controllerName)
return err
}
if !started {
glog.Warningf("Skipping %q", controllerName)
continue
}
glog.Infof("Started %q", controllerName)
}

// no special order
kc.RunNodeController()
kc.RunScheduler()
kc.RunReplicationController(rcClient)
kc.RunReplicaSetController(rsClient)
kc.RunDeploymentController(deploymentClient)
kc.RunGarbageCollectorController(garbageCollectorControllerClient, garbageCollectorControllerConfig)
Expand Down
5 changes: 4 additions & 1 deletion test/cmd/quota.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,10 @@ os::cmd::expect_success 'oc new-project asmail [email protected]'
os::cmd::try_until_text 'oc get appliedclusterresourcequota -n bar --as deads -o name' "for-deads-by-annotation"
os::cmd::try_until_text 'oc get appliedclusterresourcequota -n foo --as deads -o name' "for-deads-by-annotation"
os::cmd::try_until_text 'oc get appliedclusterresourcequota -n asmail --as [email protected] -o name' "for-deads-email-by-annotation"
os::cmd::try_until_text 'oc describe appliedclusterresourcequota/for-deads-by-annotation -n bar --as deads' "secrets.*1[0-9]"
# the point of the test is to make sure that clusterquota is counting correct and secrets are auto-created and countable
# the create_dockercfg controller can issue multiple creates if the token controller doesn't fill them in, but the creates are duplicates
# since an annotation tracks the intended secrets to be created. That results in multi-counting quota until reconciliation runs
os::cmd::try_until_text 'oc describe appliedclusterresourcequota/for-deads-by-annotation -n bar --as deads' "secrets.*(1[0-9]|20|21|22)"
os::cmd::expect_success 'oc delete project foo'
os::cmd::try_until_not_text 'oc get clusterresourcequota/for-deads-by-annotation -o jsonpath="{.status.namespaces[*].namespace}"' 'foo'
os::cmd::expect_success 'oc delete project bar'
Expand Down
63 changes: 6 additions & 57 deletions test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2781,6 +2781,12 @@ items:
- get
- put
- update
- apiVersion: v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: system:replication-controller
rules: []
- apiVersion: v1
kind: ClusterRole
metadata:
Expand Down Expand Up @@ -3725,63 +3731,6 @@ items:
- create
- patch
- update
- apiVersion: v1
kind: ClusterRole
metadata:
annotations:
authorization.openshift.io/system-only: "true"
creationTimestamp: null
name: system:replication-controller
rules:
- apiGroups:
- ""
attributeRestrictions: null
resources:
- replicationcontrollers
verbs:
- list
- watch
- apiGroups:
- ""
attributeRestrictions: null
resources:
- replicationcontrollers
verbs:
- get
- update
- apiGroups:
- ""
attributeRestrictions: null
resources:
- replicationcontrollers/status
verbs:
- update
- apiGroups:
- ""
attributeRestrictions: null
resources:
- pods
verbs:
- list
- watch
- apiGroups:
- ""
attributeRestrictions: null
resources:
- pods
verbs:
- create
- delete
- patch
- apiGroups:
- ""
attributeRestrictions: null
resources:
- events
verbs:
- create
- patch
- update
- apiVersion: v1
kind: ClusterRole
metadata:
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading