diff --git a/pkg/cmd/server/bootstrappolicy/dead.go b/pkg/cmd/server/bootstrappolicy/dead.go index 8c70efbcd730..6618829385c7 100644 --- a/pkg/cmd/server/bootstrappolicy/dead.go +++ b/pkg/cmd/server/bootstrappolicy/dead.go @@ -33,5 +33,17 @@ func GetDeadClusterRoles() []authorizationapi.ClusterRole { } func init() { + // these were replaced by kube controller roles addDeadClusterRole("system:replication-controller") + addDeadClusterRole("system:endpoint-controller") + addDeadClusterRole("system:replicaset-controller") + addDeadClusterRole("system:garbage-collector-controller") + addDeadClusterRole("system:job-controller") + addDeadClusterRole("system:hpa-controller") + addDeadClusterRole("system:daemonset-controller") + addDeadClusterRole("system:disruption-controller") + addDeadClusterRole("system:namespace-controller") + addDeadClusterRole("system:gc-controller") + addDeadClusterRole("system:certificate-signing-controller") + addDeadClusterRole("system:statefulset-controller") } diff --git a/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go b/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go index 68c36cab5bd6..b84581ae53c6 100644 --- a/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go +++ b/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go @@ -6,12 +6,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" kapi "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/certificates" "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/apis/storage" authorizationapi "github.com/openshift/origin/pkg/authorization/api" @@ -26,30 +22,12 @@ const ( InfraBuildControllerServiceAccountName = "build-controller" BuildControllerRoleName = "system:build-controller" - InfraReplicaSetControllerServiceAccountName = "replicaset-controller" - ReplicaSetControllerRoleName = "system:replicaset-controller" - InfraDeploymentConfigControllerServiceAccountName = "deploymentconfig-controller" DeploymentConfigControllerRoleName = "system:deploymentconfig-controller" InfraDeploymentControllerServiceAccountName = "deployment-controller" DeploymentControllerRoleName = "system:deployment-controller" - InfraJobControllerServiceAccountName = "job-controller" - JobControllerRoleName = "system:job-controller" - - InfraDaemonSetControllerServiceAccountName = "daemonset-controller" - DaemonSetControllerRoleName = "system:daemonset-controller" - - InfraDisruptionControllerServiceAccountName = "disruption-controller" - DisruptionControllerRoleName = "system:disruption-controller" - - InfraHPAControllerServiceAccountName = "hpa-controller" - HPAControllerRoleName = "system:hpa-controller" - - InfraNamespaceControllerServiceAccountName = "namespace-controller" - NamespaceControllerRoleName = "system:namespace-controller" - InfraPersistentVolumeBinderControllerServiceAccountName = "pv-binder-controller" PersistentVolumeBinderControllerRoleName = "system:pv-binder-controller" @@ -62,35 +40,20 @@ const ( InfraPersistentVolumeProvisionerControllerServiceAccountName = "pv-provisioner-controller" PersistentVolumeProvisionerControllerRoleName = "system:pv-provisioner-controller" - InfraGCControllerServiceAccountName = "gc-controller" - GCControllerRoleName = "system:gc-controller" - InfraServiceLoadBalancerControllerServiceAccountName = "service-load-balancer-controller" ServiceLoadBalancerControllerRoleName = "system:service-load-balancer-controller" - InfraStatefulSetControllerServiceAccountName = "statefulset-controller" - StatefulSetControllerRoleName = "system:statefulset-controller" - - InfraCertificateSigningControllerServiceAccountName = "certificate-signing-controller" - CertificateSigningControllerRoleName = "system:certificate-signing-controller" - InfraUnidlingControllerServiceAccountName = "unidling-controller" UnidlingControllerRoleName = "system:unidling-controller" ServiceServingCertServiceAccountName = "service-serving-cert-controller" ServiceServingCertControllerRoleName = "system:service-serving-cert-controller" - InfraEndpointControllerServiceAccountName = "endpoint-controller" - EndpointControllerRoleName = "system:endpoint-controller" - InfraServiceIngressIPControllerServiceAccountName = "service-ingress-ip-controller" ServiceIngressIPControllerRoleName = "system:service-ingress-ip-controller" InfraNodeBootstrapServiceAccountName = "node-bootstrapper" NodeBootstrapRoleName = "system:node-bootstrapper" - - InfraGarbageCollectorControllerServiceAccountName = "garbage-collector-controller" - GarbageCollectorControllerRoleName = "system:garbage-collector-controller" ) type InfraServiceAccounts struct { @@ -287,139 +250,6 @@ func init() { panic(err) } - err = InfraSAs.addServiceAccount( - InfraReplicaSetControllerServiceAccountName, - authorizationapi.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: ReplicaSetControllerRoleName, - }, - Rules: []authorizationapi.PolicyRule{ - { - APIGroups: []string{extensions.GroupName}, - Verbs: sets.NewString("get", "list", "watch", "update"), - Resources: sets.NewString("replicasets"), - }, - { - APIGroups: []string{extensions.GroupName}, - Verbs: sets.NewString("update"), - Resources: sets.NewString("replicasets/status"), - }, - { - Verbs: sets.NewString("list", "watch", "create", "delete"), - Resources: sets.NewString("pods"), - }, - { - Verbs: sets.NewString("create", "update", "patch"), - Resources: sets.NewString("events"), - }, - }, - }, - ) - if err != nil { - panic(err) - } - - err = InfraSAs.addServiceAccount( - InfraJobControllerServiceAccountName, - authorizationapi.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: JobControllerRoleName, - }, - Rules: []authorizationapi.PolicyRule{ - // JobController.jobController.ListWatch - // CronJobController.SyncAll - // CronJobController.SyncOne - { - APIGroups: []string{extensions.GroupName, batch.GroupName}, - Verbs: sets.NewString("get", "list", "watch"), - // TODO do we need to keep scheduledjobs or is cronjobs sufficient? - Resources: sets.NewString("jobs", "scheduledjobs", "cronjobs"), - }, - // JobController.syncJob - // CronJobController.SyncOne - { - APIGroups: []string{extensions.GroupName, batch.GroupName}, - Verbs: sets.NewString("update"), - Resources: sets.NewString("jobs/status", "scheduledjobs/status", "cronjobs/status"), - }, - // CronJobController.SyncOne - { - APIGroups: []string{extensions.GroupName, batch.GroupName}, - Verbs: sets.NewString("create", "update", "delete"), - Resources: sets.NewString("jobs"), - }, - // JobController.podController.ListWatch - { - Verbs: sets.NewString("list", "watch"), - Resources: sets.NewString("pods"), - }, - // JobController.podControl (RealPodControl) - { - Verbs: sets.NewString("create", "delete"), - Resources: sets.NewString("pods"), - }, - // JobController.podControl.recorder - { - Verbs: sets.NewString("create", "update", "patch"), - Resources: sets.NewString("events"), - }, - }, - }, - ) - if err != nil { - panic(err) - } - - err = InfraSAs.addServiceAccount( - InfraHPAControllerServiceAccountName, - authorizationapi.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: HPAControllerRoleName, - }, - Rules: []authorizationapi.PolicyRule{ - // HPA Controller - { - APIGroups: []string{extensions.GroupName, autoscaling.GroupName}, - Verbs: sets.NewString("get", "list", "watch"), - Resources: sets.NewString("horizontalpodautoscalers"), - }, - { - APIGroups: []string{extensions.GroupName, autoscaling.GroupName}, - Verbs: sets.NewString("update"), - Resources: sets.NewString("horizontalpodautoscalers/status"), - }, - { - APIGroups: []string{extensions.GroupName, kapi.GroupName}, - Verbs: sets.NewString("get", "update"), - Resources: sets.NewString("replicationcontrollers/scale"), - }, - { - Verbs: sets.NewString("get", "update"), - Resources: sets.NewString("deploymentconfigs/scale"), - }, - { - Verbs: sets.NewString("create", "update", "patch"), - Resources: sets.NewString("events"), - }, - // Heapster MetricsClient - { - Verbs: sets.NewString("list"), - Resources: sets.NewString("pods"), - }, - { - // TODO: fix MetricsClient to no longer require root proxy access - // TODO: restrict this to the appropriate namespace - Verbs: sets.NewString("proxy"), - Resources: sets.NewString("services"), - ResourceNames: sets.NewString("https:heapster:"), - }, - }, - }, - ) - if err != nil { - panic(err) - } - err = InfraSAs.addServiceAccount( InfraPersistentVolumeRecyclerControllerServiceAccountName, authorizationapi.ClusterRole{ @@ -653,170 +483,6 @@ func init() { panic(err) } - err = InfraSAs.addServiceAccount( - InfraDaemonSetControllerServiceAccountName, - authorizationapi.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: DaemonSetControllerRoleName, - }, - Rules: []authorizationapi.PolicyRule{ - // DaemonSetsController.dsStore.ListWatch - { - APIGroups: []string{extensions.GroupName}, - Verbs: sets.NewString("get", "list", "watch"), - Resources: sets.NewString("daemonsets"), - }, - // DaemonSetsController.podStore.ListWatch - { - Verbs: sets.NewString("list", "watch"), - Resources: sets.NewString("pods"), - }, - // DaemonSetsController.nodeStore.ListWatch - { - Verbs: sets.NewString("list", "watch"), - Resources: sets.NewString("nodes"), - }, - // DaemonSetsController.storeDaemonSetStatus - { - APIGroups: []string{extensions.GroupName}, - Verbs: sets.NewString("update"), - Resources: sets.NewString("daemonsets/status"), - }, - // DaemonSetsController.podControl (RealPodControl) - { - Verbs: sets.NewString("create", "delete", "patch"), - Resources: sets.NewString("pods"), - }, - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("create"), - Resources: sets.NewString("pods/binding"), - }, - // DaemonSetsController.podControl.recorder - { - Verbs: sets.NewString("create", "update", "patch"), - Resources: sets.NewString("events"), - }, - }, - }, - ) - if err != nil { - panic(err) - } - - err = InfraSAs.addServiceAccount( - InfraDisruptionControllerServiceAccountName, - authorizationapi.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: DisruptionControllerRoleName, - }, - Rules: []authorizationapi.PolicyRule{ - // DisruptionBudgetController.dStore.ListWatch - { - APIGroups: []string{extensions.GroupName}, - Verbs: sets.NewString("list", "watch"), - Resources: sets.NewString("deployments"), - }, - // DisruptionBudgetController.rsStore.ListWatch - { - APIGroups: []string{extensions.GroupName}, - Verbs: sets.NewString("list", "watch"), - Resources: sets.NewString("replicasets"), - }, - // DisruptionBudgetController.rcStore.ListWatch - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("list", "watch"), - Resources: sets.NewString("replicationcontrollers"), - }, - { - APIGroups: []string{apps.GroupName}, - Verbs: sets.NewString("list", "watch"), - Resources: sets.NewString("statefulsets"), - }, - { - APIGroups: []string{policy.GroupName}, - Verbs: sets.NewString("get", "list", "watch"), - Resources: sets.NewString("poddisruptionbudgets"), - }, - // DisruptionBudgetController.dbControl - { - APIGroups: []string{policy.GroupName}, - Verbs: sets.NewString("update"), - Resources: sets.NewString("poddisruptionbudgets/status"), - }, - }, - }, - ) - if err != nil { - panic(err) - } - - err = InfraSAs.addServiceAccount( - InfraNamespaceControllerServiceAccountName, - authorizationapi.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: NamespaceControllerRoleName, - }, - Rules: []authorizationapi.PolicyRule{ - // Watching/deleting namespaces - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("get", "list", "watch", "delete"), - Resources: sets.NewString("namespaces"), - }, - // Updating status to terminating, updating finalizer list - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("update"), - Resources: sets.NewString("namespaces/finalize", "namespaces/status"), - }, - - // Ability to delete resources - { - APIGroups: []string{"*"}, - Verbs: sets.NewString("get", "list", "delete", "deletecollection"), - Resources: sets.NewString("*"), - }, - }, - }, - ) - if err != nil { - panic(err) - } - - err = InfraSAs.addServiceAccount( - InfraGCControllerServiceAccountName, - authorizationapi.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: GCControllerRoleName, - }, - Rules: []authorizationapi.PolicyRule{ - // GCController.podStore.ListWatch - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("list", "watch"), - Resources: sets.NewString("pods"), - }, - // GCController.nodeStore.ListWatch - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("list", "watch"), - Resources: sets.NewString("nodes"), - }, - // GCController.deletePod - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("delete"), - Resources: sets.NewString("pods"), - }, - }, - }, - ) - if err != nil { - panic(err) - } - err = InfraSAs.addServiceAccount( InfraServiceLoadBalancerControllerServiceAccountName, authorizationapi.ClusterRole{ @@ -860,62 +526,6 @@ func init() { panic(err) } - err = InfraSAs.addServiceAccount( - InfraStatefulSetControllerServiceAccountName, - authorizationapi.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: StatefulSetControllerRoleName, - }, - Rules: []authorizationapi.PolicyRule{ - // StatefulSetController.podCache.ListWatch - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("list", "watch"), - Resources: sets.NewString("pods"), - }, - // StatefulSetController.cache.ListWatch - { - APIGroups: []string{apps.GroupName}, - Verbs: sets.NewString("list", "watch"), - Resources: sets.NewString("statefulsets"), - }, - // StatefulSetController.petClient - { - APIGroups: []string{apps.GroupName}, - Verbs: sets.NewString("get"), - Resources: sets.NewString("statefulsets"), - }, - { - APIGroups: []string{apps.GroupName}, - Verbs: sets.NewString("update"), - Resources: sets.NewString("statefulsets/status"), - }, - // StatefulSetController.podControl - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("get", "create", "delete", "update", "patch"), - Resources: sets.NewString("pods"), - }, - // StatefulSetController.petClient (PVC) - // This is an escalating client and we must admission check the statefulset - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("get", "create"), // future "delete" - Resources: sets.NewString("persistentvolumeclaims"), - }, - // StatefulSetController.eventRecorder - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("create", "update", "patch"), - Resources: sets.NewString("events"), - }, - }, - }, - ) - if err != nil { - panic(err) - } - err = InfraSAs.addServiceAccount( InfraUnidlingControllerServiceAccountName, authorizationapi.ClusterRole{ @@ -989,62 +599,6 @@ func init() { panic(err) } - err = InfraSAs.addServiceAccount( - InfraCertificateSigningControllerServiceAccountName, - authorizationapi.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: CertificateSigningControllerRoleName, - }, - Rules: []authorizationapi.PolicyRule{ - { - APIGroups: []string{certificates.GroupName}, - Verbs: sets.NewString("list", "watch"), - Resources: sets.NewString("certificatesigningrequests"), - }, - { - APIGroups: []string{certificates.GroupName}, - Verbs: sets.NewString("update"), - Resources: sets.NewString("certificatesigningrequests/status", "certificatesigningrequests/approval"), - }, - }, - }, - ) - if err != nil { - panic(err) - } - - err = InfraSAs.addServiceAccount( - InfraEndpointControllerServiceAccountName, - authorizationapi.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: EndpointControllerRoleName, - }, - Rules: []authorizationapi.PolicyRule{ - // Watching services and pods - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("get", "list", "watch"), - Resources: sets.NewString("services", "pods"), - }, - // Managing endpoints - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("get", "list", "create", "update", "delete"), - Resources: sets.NewString("endpoints"), - }, - // Permission for RestrictedEndpointsAdmission - { - APIGroups: []string{kapi.GroupName}, - Verbs: sets.NewString("create"), - Resources: sets.NewString("endpoints/restricted"), - }, - }, - }, - ) - if err != nil { - panic(err) - } - err = InfraSAs.addServiceAccount( InfraServiceIngressIPControllerServiceAccountName, authorizationapi.ClusterRole{ @@ -1103,23 +657,4 @@ func init() { panic(err) } - err = InfraSAs.addServiceAccount( - InfraGarbageCollectorControllerServiceAccountName, - authorizationapi.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: GarbageCollectorControllerRoleName, - }, - Rules: []authorizationapi.PolicyRule{ - // Ability to delete resources and remove ownerRefs - { - APIGroups: []string{"*"}, - Verbs: sets.NewString("get", "list", "watch", "patch", "update", "delete"), - Resources: sets.NewString("*"), - }, - }, - }, - ) - if err != nil { - panic(err) - } } diff --git a/pkg/cmd/server/kubernetes/master/master.go b/pkg/cmd/server/kubernetes/master/kube_controller_init.go similarity index 53% rename from pkg/cmd/server/kubernetes/master/master.go rename to pkg/cmd/server/kubernetes/master/kube_controller_init.go index b24d9ada9adc..acd951e50304 100644 --- a/pkg/cmd/server/kubernetes/master/master.go +++ b/pkg/cmd/server/kubernetes/master/kube_controller_init.go @@ -5,50 +5,25 @@ import ( "io/ioutil" "net" "os" - "time" "github.com/golang/glog" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" utilwait "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/storage" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/discovery" - "k8s.io/client-go/dynamic" kv1core "k8s.io/client-go/kubernetes/typed/core/v1" kclientv1 "k8s.io/client-go/pkg/api/v1" - restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" kctrlmgr "k8s.io/kubernetes/cmd/kube-controller-manager/app" kapi "k8s.io/kubernetes/pkg/api" kapiv1 "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apis/certificates" "k8s.io/kubernetes/pkg/apis/componentconfig" kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1" - certcontroller "k8s.io/kubernetes/pkg/controller/certificates" - "k8s.io/kubernetes/pkg/controller/cronjob" - "k8s.io/kubernetes/pkg/controller/daemon" - "k8s.io/kubernetes/pkg/controller/deployment" - "k8s.io/kubernetes/pkg/controller/disruption" - endpointcontroller "k8s.io/kubernetes/pkg/controller/endpoint" - "k8s.io/kubernetes/pkg/controller/garbagecollector" - "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly" - jobcontroller "k8s.io/kubernetes/pkg/controller/job" - namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace" nodecontroller "k8s.io/kubernetes/pkg/controller/node" - podautoscalercontroller "k8s.io/kubernetes/pkg/controller/podautoscaler" - "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" - gccontroller "k8s.io/kubernetes/pkg/controller/podgc" - replicasetcontroller "k8s.io/kubernetes/pkg/controller/replicaset" servicecontroller "k8s.io/kubernetes/pkg/controller/service" - statefulsetcontroller "k8s.io/kubernetes/pkg/controller/statefulset" attachdetachcontroller "k8s.io/kubernetes/pkg/controller/volume/attachdetach" persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/aws_ebs" "k8s.io/kubernetes/pkg/volume/azure_dd" @@ -65,30 +40,11 @@ import ( schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" latestschedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest" "k8s.io/kubernetes/plugin/pkg/scheduler/factory" - - osclient "github.com/openshift/origin/pkg/client" - "github.com/openshift/origin/pkg/cmd/server/election" ) -func newMasterLeases(storage storage.Interface) election.Leases { - // leaseTTL is in seconds, i.e. 15 means 15 seconds; do NOT do 15*time.Second! - leaseTTL := uint64((master.DefaultEndpointReconcilerInterval + 5*time.Second) / time.Second) // add 5 seconds for wiggle room - return election.NewLeases(storage, "/masterleases/", leaseTTL) -} - -// RunNamespaceController starts the Kubernetes Namespace Manager -func (c *MasterConfig) RunNamespaceController(kubeClient kclientset.Interface, clientPool dynamic.ClientPool, namespaceInformer coreinformers.NamespaceInformer) { - // Find the list of namespaced resources via discovery that the namespace controller must manage - groupVersionResources, err := kubeClient.Discovery().ServerPreferredNamespacedResources() - if err != nil { - glog.Fatalf("Failed to get resources: %v", err) - } - gvrFn := func() ([]*metav1.APIResourceList, error) { - return groupVersionResources, nil - } - namespaceController := namespacecontroller.NewNamespaceController(kubeClient, clientPool, gvrFn, namespaceInformer, c.ControllerManager.NamespaceSyncPeriod.Duration, kapiv1.FinalizerKubernetes) - go namespaceController.Run(int(c.ControllerManager.ConcurrentNamespaceSyncs), utilwait.NeverStop) -} +// this file contains our special cased controller initialization functions. +// TODO refactor this into the same controller init function style. I suspect this means having the MasterConfig +// produce a set of controller init functions. For now, don't mess with this to keep the diff sane. func (c *MasterConfig) RunPersistentVolumeController(client kclientset.Interface, namespace, recyclerImageName, recyclerServiceAccountName string) { s := c.ControllerManager @@ -196,90 +152,6 @@ func probeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration, na return allPlugins } -func (c *MasterConfig) RunReplicaSetController(client kclientset.Interface) { - controller := replicasetcontroller.NewReplicaSetController( - c.Informers.KubernetesInformers().Extensions().V1beta1().ReplicaSets(), - c.Informers.KubernetesInformers().Core().V1().Pods(), - client, - replicasetcontroller.BurstReplicas, - ) - go controller.Run(int(c.ControllerManager.ConcurrentRSSyncs), utilwait.NeverStop) -} - -func (c *MasterConfig) RunDeploymentController(client kclientset.Interface) { - controller := deployment.NewDeploymentController( - c.Informers.KubernetesInformers().Extensions().V1beta1().Deployments(), - c.Informers.KubernetesInformers().Extensions().V1beta1().ReplicaSets(), - c.Informers.KubernetesInformers().Core().V1().Pods(), - client, - ) - go controller.Run(int(c.ControllerManager.ConcurrentDeploymentSyncs), utilwait.NeverStop) -} - -// RunJobController starts the Kubernetes job controller sync loop -func (c *MasterConfig) RunJobController(client kclientset.Interface) { - controller := jobcontroller.NewJobController( - c.Informers.KubernetesInformers().Core().V1().Pods(), - c.Informers.KubernetesInformers().Batch().V1().Jobs(), - client, - ) - go controller.Run(int(c.ControllerManager.ConcurrentJobSyncs), utilwait.NeverStop) -} - -// RunCronJobController starts the Kubernetes scheduled job controller sync loop -func (c *MasterConfig) RunCronJobController(client kclientset.Interface) { - go cronjob.NewCronJobController(client).Run(utilwait.NeverStop) -} - -// RunDisruptionBudgetController starts the Kubernetes disruption budget controller -func (c *MasterConfig) RunDisruptionBudgetController(client kclientset.Interface) { - go disruption.NewDisruptionController( - c.Informers.KubernetesInformers().Core().V1().Pods(), - c.Informers.KubernetesInformers().Policy().V1beta1().PodDisruptionBudgets(), - c.Informers.KubernetesInformers().Core().V1().ReplicationControllers(), - c.Informers.KubernetesInformers().Extensions().V1beta1().ReplicaSets(), - c.Informers.KubernetesInformers().Extensions().V1beta1().Deployments(), - c.Informers.KubernetesInformers().Apps().V1beta1().StatefulSets(), - client, - ).Run(utilwait.NeverStop) -} - -// RunHPAController starts the Kubernetes hpa controller sync loop -func (c *MasterConfig) RunHPAController(oc *osclient.Client, kc kclientset.Interface, heapsterNamespace string) { - delegatingScaleNamespacer := osclient.NewDelegatingScaleNamespacer(oc, kc.ExtensionsV1beta1()) - metricsClient := metrics.NewHeapsterMetricsClient(kc, heapsterNamespace, "https", "heapster", "") - replicaCalc := podautoscalercontroller.NewReplicaCalculator(metricsClient, kc.Core()) - podautoscaler := podautoscalercontroller.NewHorizontalController( - kv1core.New(kc.Core().RESTClient()), - delegatingScaleNamespacer, - kc.AutoscalingV1(), - replicaCalc, - c.Informers.KubernetesInformers().Autoscaling().V1().HorizontalPodAutoscalers(), - c.ControllerManager.HorizontalPodAutoscalerSyncPeriod.Duration, - ) - go podautoscaler.Run(utilwait.NeverStop) -} - -func (c *MasterConfig) RunDaemonSetsController(client kclientset.Interface) { - controller := daemon.NewDaemonSetsController( - c.Informers.KubernetesInformers().Extensions().V1beta1().DaemonSets(), - c.Informers.KubernetesInformers().Core().V1().Pods(), - c.Informers.KubernetesInformers().Core().V1().Nodes(), - client, - ) - go controller.Run(int(c.ControllerManager.ConcurrentDaemonSetSyncs), utilwait.NeverStop) -} - -// RunEndpointController starts the Kubernetes replication controller sync loop -func (c *MasterConfig) RunEndpointController(client kclientset.Interface) { - endpoints := endpointcontroller.NewEndpointController( - c.Informers.KubernetesInformers().Core().V1().Pods(), - c.Informers.KubernetesInformers().Core().V1().Services(), - client, - ) - go endpoints.Run(int(c.ControllerManager.ConcurrentEndpointSyncs), utilwait.NeverStop) -} - // RunScheduler starts the Kubernetes scheduler func (c *MasterConfig) RunScheduler() { config, err := c.createSchedulerConfig() @@ -294,53 +166,6 @@ func (c *MasterConfig) RunScheduler() { go s.Run() } -// RunGCController handles deletion of terminated pods. -func (c *MasterConfig) RunGCController(client kclientset.Interface) { - if c.ControllerManager.TerminatedPodGCThreshold > 0 { - gcController := gccontroller.NewPodGC( - client, - c.Informers.KubernetesInformers().Core().V1().Pods(), - int(c.ControllerManager.TerminatedPodGCThreshold), - ) - go gcController.Run(utilwait.NeverStop) - } -} - -// RunGarbageCollectorController starts generic garbage collection for the cluster. -func (c *MasterConfig) RunGarbageCollectorController(client *osclient.Client, config *restclient.Config) { - if !c.ControllerManager.EnableGarbageCollector { - return - } - - preferredResources, err := client.Discovery().ServerPreferredResources() - if err != nil { - glog.Fatalf("failed to get supported resources from server: %v", err) - } - - deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete"}}, preferredResources) - deletableGroupVersionResources, err := discovery.GroupVersionResources(deletableResources) - if err != nil { - glog.Fatalf("Failed to parse resources from server: %v", err) - } - - config = restclient.AddUserAgent(config, "generic-garbage-collector") - config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()} - // TODO: should use a dynamic RESTMapper built from the discovery results. - restMapper := kapi.Registry.RESTMapper() - // TODO: needs to take GVR - metaOnlyClientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc) - config.ContentConfig.NegotiatedSerializer = nil - // TODO: needs to take GVR - clientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc) - garbageCollector, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, restMapper, deletableGroupVersionResources) - if err != nil { - glog.Fatalf("Failed to start the garbage collector: %v", err) - } - - workers := int(c.ControllerManager.ConcurrentGCSyncs) - go garbageCollector.Run(workers, utilwait.NeverStop) -} - // RunNodeController starts the node controller // TODO: handle node CIDR and route allocation func (c *MasterConfig) RunNodeController() { @@ -402,17 +227,6 @@ func (c *MasterConfig) RunServiceLoadBalancerController(client kclientset.Interf } } -// RunStatefulSetController starts the StatefulSet controller -func (c *MasterConfig) RunStatefulSetController(client kclientset.Interface) { - ps := statefulsetcontroller.NewStatefulSetController( - c.Informers.KubernetesInformers().Core().V1().Pods(), - c.Informers.KubernetesInformers().Apps().V1beta1().StatefulSets(), - c.Informers.KubernetesInformers().Core().V1().PersistentVolumeClaims(), - client, - ) - go ps.Run(1, utilwait.NeverStop) -} - func (c *MasterConfig) createSchedulerConfig() (*scheduler.Config, error) { var policy schedulerapi.Policy var configData []byte @@ -446,32 +260,3 @@ func (c *MasterConfig) createSchedulerConfig() (*scheduler.Config, error) { // if the config file isn't provided, use the default provider return configFactory.CreateFromProvider(factory.DefaultProvider) } - -type noAutoApproval struct{} - -func (noAutoApproval) AutoApprove(csr *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) { - return csr, nil -} - -func (c *MasterConfig) RunCertificateSigningController(clientset kclientset.Interface) { - if len(c.ControllerManager.ClusterSigningCertFile) == 0 || len(c.ControllerManager.ClusterSigningKeyFile) == 0 { - glog.V(2).Infof("Certificate signer controller will not start - no signing key or cert set") - return - } - - signer, err := certcontroller.NewCFSSLSigner(c.ControllerManager.ClusterSigningCertFile, c.ControllerManager.ClusterSigningKeyFile) - if err != nil { - glog.Fatalf("Failed to start certificate controller: %v", err) - } - - certController, err := certcontroller.NewCertificateController( - clientset, - c.Informers.KubernetesInformers().Certificates().V1beta1().CertificateSigningRequests(), - signer, - certcontroller.NewGroupApprover(c.ControllerManager.ApproveAllKubeletCSRsForGroup), - ) - if err != nil { - glog.Fatalf("Failed to start certificate controller: %v", err) - } - go certController.Run(1, utilwait.NeverStop) -} diff --git a/pkg/cmd/server/kubernetes/master/master_config.go b/pkg/cmd/server/kubernetes/master/master_config.go index 2d645993b492..dac878a8c7bd 100644 --- a/pkg/cmd/server/kubernetes/master/master_config.go +++ b/pkg/cmd/server/kubernetes/master/master_config.go @@ -37,6 +37,7 @@ import ( apiserveroptions "k8s.io/apiserver/pkg/server/options" genericoptions "k8s.io/apiserver/pkg/server/options" apiserverstorage "k8s.io/apiserver/pkg/server/storage" + "k8s.io/apiserver/pkg/storage" storagefactory "k8s.io/apiserver/pkg/storage/storagebackend/factory" utilflag "k8s.io/apiserver/pkg/util/flag" kapiserveroptions "k8s.io/kubernetes/cmd/kube-apiserver/app/options" @@ -741,3 +742,9 @@ func readCAorNil(file string) ([]byte, error) { } return ioutil.ReadFile(file) } + +func newMasterLeases(storage storage.Interface) election.Leases { + // leaseTTL is in seconds, i.e. 15 means 15 seconds; do NOT do 15*time.Second! + leaseTTL := uint64((master.DefaultEndpointReconcilerInterval + 5*time.Second) / time.Second) // add 5 seconds for wiggle room + return election.NewLeases(storage, "/masterleases/", leaseTTL) +} diff --git a/pkg/cmd/server/start/start_master.go b/pkg/cmd/server/start/start_master.go index 6e4b04cb2aed..15b4c164ec0a 100644 --- a/pkg/cmd/server/start/start_master.go +++ b/pkg/cmd/server/start/start_master.go @@ -16,19 +16,11 @@ import ( "github.com/spf13/cobra" kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" utilwait "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" restclient "k8s.io/client-go/rest" kctrlmgr "k8s.io/kubernetes/cmd/kube-controller-manager/app" cmapp "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" - kapi "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/controller" kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -606,205 +598,106 @@ func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) erro oc.RunServiceAccountPullSecretsControllers() oc.RunSecurityAllocationController() - if kc != nil { - _, _, _, rsClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraReplicaSetControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for replication controller: %v", err) - } - _, _, _, deploymentClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraDeploymentControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for deployment controller: %v", err) - } - - // TODO there has to be a better way to do this! - // Make a copy of the client config because we need to modify it - jobClientConfig := oc.PrivilegedLoopbackClientConfig - jobClientConfig.ContentConfig.GroupVersion = &schema.GroupVersion{Group: "batch", Version: "v2alpha1"} - _, _, _, jobClient, err := oc.GetServiceAccountClientsWithConfig(bootstrappolicy.InfraJobControllerServiceAccountName, jobClientConfig) - if err != nil { - glog.Fatalf("Could not get client for job controller: %v", err) - } - - _, hpaOClient, _, hpaKClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraHPAControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for HPA controller: %v", err) - } - - _, _, _, binderClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeBinderControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for persistent volume binder controller: %v", err) - } - - _, _, _, attachDetachControllerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeAttachDetachControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for attach detach controller: %v", err) - } - - _, _, _, daemonSetClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraDaemonSetControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for daemonset controller: %v", err) - } - - _, _, _, disruptionClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraDisruptionControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for disruption budget controller: %v", err) - } - - _, _, _, gcClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraGCControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for pod gc controller: %v", err) - } + _, _, _, binderClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeBinderControllerServiceAccountName) + if err != nil { + glog.Fatalf("Could not get client for persistent volume binder controller: %v", err) + } - _, _, _, serviceLoadBalancerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraServiceLoadBalancerControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for pod gc controller: %v", err) - } + _, _, _, attachDetachControllerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeAttachDetachControllerServiceAccountName) + if err != nil { + glog.Fatalf("Could not get client for attach detach controller: %v", err) + } - _, _, _, statefulSetClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraStatefulSetControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for pet set controller: %v", err) - } + _, _, _, serviceLoadBalancerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraServiceLoadBalancerControllerServiceAccountName) + if err != nil { + glog.Fatalf("Could not get client for pod gc controller: %v", err) + } - _, _, _, certificateSigningClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraCertificateSigningControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for disruption budget controller: %v", err) - } + rootClientBuilder := controller.SimpleControllerClientBuilder{ + ClientConfig: &oc.PrivilegedLoopbackClientConfig, + } + saClientBuilder := controller.SAControllerClientBuilder{ + ClientConfig: restclient.AnonymousClientConfig(&oc.PrivilegedLoopbackClientConfig), + CoreClient: oc.PrivilegedLoopbackKubernetesClientsetExternal.Core(), + AuthenticationClient: oc.PrivilegedLoopbackKubernetesClientsetExternal.Authentication(), + Namespace: "kube-system", + } + availableResources, err := kctrlmgr.GetAvailableResources(rootClientBuilder) + if err != nil { + return err + } - namespaceControllerClientConfig, _, _, namespaceControllerKubeClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraNamespaceControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for namespace controller: %v", err) - } - // TODO: should use a dynamic RESTMapper built from the discovery results. - restMapper := kapi.Registry.RESTMapper() - namespaceControllerClientPool := dynamic.NewClientPool(namespaceControllerClientConfig, restMapper, dynamic.LegacyAPIPathResolverFunc) + controllerContext := kctrlmgr.ControllerContext{ + ClientBuilder: saClientBuilder, + InformerFactory: oc.Informers.KubernetesInformers(), + Options: *controllerManagerOptions, + AvailableResources: availableResources, + Stop: utilwait.NeverStop, + } + controllerInitializers := kctrlmgr.NewControllerInitializers() + + // TODO I think this should become a blacklist kept in sync during rebases with a unit test. + allowedControllers := sets.NewString( + "endpoint", + "replicationcontroller", + "podgc", + "namespace", + "garbagecollector", + "daemonset", + "job", + "deployment", + "replicaset", + "horizontalpodautoscaling", + "disruption", + "statefuleset", + "cronjob", + "certificatesigningrequests", + + // not used in openshift. Yet? + // "ttl", + // "bootstrapsigner", + // "tokencleaner", + + // These controllers need to have their own init functions until we extend the upstream controller config + // TODO we have a different set of managed names which need to be plumbed through. + // "serviceaccount", + // TODO this controller takes different evaluators. + // "resourcequota", + ) - _, _, _, endpointControllerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraEndpointControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for endpoint controller: %v", err) + for controllerName, initFn := range controllerInitializers { + // TODO remove this. Only call one to start to prove the principle + if !allowedControllers.Has(controllerName) { + glog.Warningf("%q is skipped", controllerName) + continue } - - garbageCollectorControllerConfig, garbageCollectorControllerClient, _, _, err := oc.GetServiceAccountClients(bootstrappolicy.InfraGarbageCollectorControllerServiceAccountName) - if err != nil { - glog.Fatalf("Could not get client for garbage collector controller: %v", err) + if !controllerContext.IsControllerEnabled(controllerName) { + glog.Warningf("%q is disabled", controllerName) + continue } - rootClientBuilder := controller.SimpleControllerClientBuilder{ - ClientConfig: &oc.PrivilegedLoopbackClientConfig, - } - saClientBuilder := controller.SAControllerClientBuilder{ - ClientConfig: restclient.AnonymousClientConfig(&oc.PrivilegedLoopbackClientConfig), - CoreClient: oc.PrivilegedLoopbackKubernetesClientsetExternal.Core(), - AuthenticationClient: oc.PrivilegedLoopbackKubernetesClientsetExternal.Authentication(), - Namespace: "kube-system", - } - availableResources, err := kctrlmgr.GetAvailableResources(rootClientBuilder) + glog.V(1).Infof("Starting %q", controllerName) + started, err := initFn(controllerContext) if err != nil { + glog.Errorf("Error starting %q", controllerName) return err } - - controllerContext := kctrlmgr.ControllerContext{ - ClientBuilder: saClientBuilder, - InformerFactory: oc.Informers.KubernetesInformers(), - Options: *controllerManagerOptions, - AvailableResources: availableResources, - Stop: utilwait.NeverStop, - } - controllerInitializers := kctrlmgr.NewControllerInitializers() - - // TODO remove this. Using it now to control the migration - allowedControllers := sets.NewString( - // "endpoint", - "replicationcontroller", - // "podgc", - // "resourcequota", - // "namespace", - // "serviceaccount", - // "garbagecollector", - // "daemonset", - // "job", - // "deployment", - // "replicaset", - // "horizontalpodautoscaling", - // "disruption", - // "statefuleset", - // "cronjob", - // "certificatesigningrequests", - // "ttl", - // "bootstrapsigner", - // "tokencleaner", - ) - - for controllerName, initFn := range controllerInitializers { - // TODO remove this. Only call one to start to prove the principle - if !allowedControllers.Has(controllerName) { - glog.Warningf("%q is skipped", controllerName) - continue - } - if !controllerContext.IsControllerEnabled(controllerName) { - glog.Warningf("%q is disabled", controllerName) - continue - } - - glog.V(1).Infof("Starting %q", controllerName) - started, err := initFn(controllerContext) - if err != nil { - glog.Errorf("Error starting %q", controllerName) - return err - } - if !started { - glog.Warningf("Skipping %q", controllerName) - continue - } - glog.Infof("Started %q", controllerName) - } - - // no special order - kc.RunNodeController() - kc.RunScheduler() - kc.RunReplicaSetController(rsClient) - kc.RunDeploymentController(deploymentClient) - kc.RunGarbageCollectorController(garbageCollectorControllerClient, garbageCollectorControllerConfig) - - extensionsEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, extensions.GroupName)) > 0 - - batchEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, batch.GroupName)) > 0 - if extensionsEnabled || batchEnabled { - kc.RunJobController(jobClient) - } - if batchEnabled { - kc.RunCronJobController(jobClient) - } - // TODO: enable this check once the HPA controller can use the autoscaling API if the extensions API is disabled - autoscalingEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, autoscaling.GroupName)) > 0 - if extensionsEnabled || autoscalingEnabled { - kc.RunHPAController(hpaOClient, hpaKClient, oc.Options.PolicyConfig.OpenShiftInfrastructureNamespace) + if !started { + glog.Warningf("Skipping %q", controllerName) + continue } - if extensionsEnabled { - kc.RunDaemonSetsController(daemonSetClient) - } - - policyEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, policy.GroupName)) > 0 - if policyEnabled { - kc.RunDisruptionBudgetController(disruptionClient) - } - - kc.RunEndpointController(endpointControllerClient) - kc.RunNamespaceController(namespaceControllerKubeClient, namespaceControllerClientPool, oc.Informers.KubernetesInformers().Core().V1().Namespaces()) - kc.RunPersistentVolumeController(binderClient, oc.Options.PolicyConfig.OpenShiftInfrastructureNamespace, oc.ImageFor("recycler"), bootstrappolicy.InfraPersistentVolumeRecyclerControllerServiceAccountName) - kc.RunPersistentVolumeAttachDetachController(attachDetachControllerClient) - kc.RunGCController(gcClient) - - kc.RunServiceLoadBalancerController(serviceLoadBalancerClient) - - kc.RunCertificateSigningController(certificateSigningClient) + glog.Infof("Started %q", controllerName) + } - appsEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, apps.GroupName)) > 0 - if appsEnabled { - kc.RunStatefulSetController(statefulSetClient) - } + // These controllers are special-cased upstream. We'll need custom init functions for them downstream. + // As we make them less special, we should re-visit this + kc.RunNodeController() + kc.RunScheduler() + kc.RunPersistentVolumeController(binderClient, oc.Options.PolicyConfig.OpenShiftInfrastructureNamespace, oc.ImageFor("recycler"), bootstrappolicy.InfraPersistentVolumeRecyclerControllerServiceAccountName) + kc.RunPersistentVolumeAttachDetachController(attachDetachControllerClient) + kc.RunServiceLoadBalancerController(serviceLoadBalancerClient) - glog.Infof("Started Kubernetes Controllers") - } + glog.Infof("Started Kubernetes Controllers") // no special order if configapi.IsBuildEnabled(&oc.Options) { diff --git a/test/integration/authorization_test.go b/test/integration/authorization_test.go index b48c74b3aa27..6cbd30de15a1 100644 --- a/test/integration/authorization_test.go +++ b/test/integration/authorization_test.go @@ -375,13 +375,12 @@ var globalClusterAdminUsers = sets.NewString("system:admin") var globalClusterAdminGroups = sets.NewString("system:cluster-admins", "system:masters") // This list includes the admins from above, plus users or groups known to have global view access -var globalClusterReaderUsers = sets.NewString("system:serviceaccount:openshift-infra:namespace-controller", "system:admin") +var globalClusterReaderUsers = sets.NewString("system:admin") var globalClusterReaderGroups = sets.NewString("system:cluster-readers", "system:cluster-admins", "system:masters") // this list includes any other users who can get DeploymentConfigs var globalDeploymentConfigGetterUsers = sets.NewString( "system:serviceaccount:openshift-infra:unidling-controller", - "system:serviceaccount:openshift-infra:garbage-collector-controller", "system:serviceaccount:kube-system:generic-garbage-collector", "system:serviceaccount:kube-system:namespace-controller", ) @@ -1599,7 +1598,7 @@ func TestOldLocalResourceAccessReviewEndpoint(t *testing.T) { expectedResponse := &authorizationapi.ResourceAccessReviewResponse{ Namespace: namespace, - Users: sets.NewString("harold", "system:serviceaccount:kube-system:generic-garbage-collector", "system:serviceaccount:kube-system:namespace-controller", "system:serviceaccount:openshift-infra:garbage-collector-controller", "system:serviceaccount:hammer-project:builder", "system:serviceaccount:openshift-infra:namespace-controller", "system:admin"), + Users: sets.NewString("harold", "system:serviceaccount:kube-system:generic-garbage-collector", "system:serviceaccount:kube-system:namespace-controller", "system:serviceaccount:hammer-project:builder", "system:admin"), Groups: sets.NewString("system:cluster-admins", "system:masters", "system:cluster-readers", "system:serviceaccounts:hammer-project"), } if (actualResponse.Namespace != expectedResponse.Namespace) || @@ -1626,7 +1625,7 @@ func TestOldLocalResourceAccessReviewEndpoint(t *testing.T) { expectedResponse := &authorizationapi.ResourceAccessReviewResponse{ Namespace: namespace, - Users: sets.NewString("harold", "system:serviceaccount:kube-system:generic-garbage-collector", "system:serviceaccount:kube-system:namespace-controller", "system:serviceaccount:openshift-infra:garbage-collector-controller", "system:serviceaccount:hammer-project:builder", "system:serviceaccount:openshift-infra:namespace-controller", "system:admin"), + Users: sets.NewString("harold", "system:serviceaccount:kube-system:generic-garbage-collector", "system:serviceaccount:kube-system:namespace-controller", "system:serviceaccount:hammer-project:builder", "system:admin"), Groups: sets.NewString("system:cluster-admins", "system:masters", "system:cluster-readers", "system:serviceaccounts:hammer-project"), } if (actualResponse.Namespace != expectedResponse.Namespace) || diff --git a/test/integration/endpoint_admission_test.go b/test/integration/endpoint_admission_test.go index 47fdb81b524a..4d2b4f796fb2 100644 --- a/test/integration/endpoint_admission_test.go +++ b/test/integration/endpoint_admission_test.go @@ -7,7 +7,6 @@ import ( kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" configapi "github.com/openshift/origin/pkg/cmd/server/api" - "github.com/openshift/origin/pkg/cmd/server/bootstrappolicy" serviceadmit "github.com/openshift/origin/pkg/service/admission" testutil "github.com/openshift/origin/test/util" testserver "github.com/openshift/origin/test/util/server" @@ -90,7 +89,7 @@ func TestEndpointAdmission(t *testing.T) { testOne(t, clusterAdminKubeClient, "default", "external", true) // Endpoint controller service account - _, serviceAccountClient, _, err := testutil.GetClientForServiceAccount(clusterAdminKubeClient, *clientConfig, bootstrappolicy.DefaultOpenShiftInfraNamespace, bootstrappolicy.InfraEndpointControllerServiceAccountName) + _, serviceAccountClient, _, err := testutil.GetClientForServiceAccount(clusterAdminKubeClient, *clientConfig, "kube-system", "endpoint-controller") if err != nil { t.Fatalf("error getting endpoint controller service account: %v", err) } diff --git a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml index 5b90613f63e9..aac6e41d8e15 100644 --- a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml @@ -2806,441 +2806,121 @@ items: - apiVersion: v1 kind: ClusterRole metadata: - annotations: - authorization.openshift.io/system-only: "true" creationTimestamp: null - name: system:build-controller - rules: - - apiGroups: - - "" - attributeRestrictions: null - resources: - - builds - verbs: - - get - - list - - watch - - apiGroups: - - "" - attributeRestrictions: null - resources: - - builds - verbs: - - update - - apiGroups: - - build.openshift.io - - "" - attributeRestrictions: null - resources: - - builds/custom - - builds/docker - - builds/jenkinspipeline - - builds/source - verbs: - - create - - apiGroups: - - "" - attributeRestrictions: null - resources: - - imagestreams - verbs: - - get - - apiGroups: - - "" - attributeRestrictions: null - resources: - - pods - verbs: - - create - - delete - - get - - list - - apiGroups: - - "" - attributeRestrictions: null - resources: - - events - verbs: - - create - - patch - - update + name: system:endpoint-controller + rules: [] - apiVersion: v1 kind: ClusterRole metadata: - annotations: - authorization.openshift.io/system-only: "true" creationTimestamp: null - name: system:certificate-signing-controller - rules: - - apiGroups: - - certificates.k8s.io - attributeRestrictions: null - resources: - - certificatesigningrequests - verbs: - - list - - watch - - apiGroups: - - certificates.k8s.io - attributeRestrictions: null - resources: - - certificatesigningrequests/approval - - certificatesigningrequests/status - verbs: - - update + name: system:replicaset-controller + rules: [] - apiVersion: v1 kind: ClusterRole metadata: - annotations: - authorization.openshift.io/system-only: "true" creationTimestamp: null - name: system:daemonset-controller - rules: - - apiGroups: - - extensions - attributeRestrictions: null - resources: - - daemonsets - verbs: - - get - - list - - watch - - apiGroups: - - "" - attributeRestrictions: null - resources: - - pods - verbs: - - list - - watch - - apiGroups: - - "" - attributeRestrictions: null - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - extensions - attributeRestrictions: null - resources: - - daemonsets/status - verbs: - - update - - apiGroups: - - "" - attributeRestrictions: null - resources: - - pods - verbs: - - create - - delete - - patch - - apiGroups: - - "" - attributeRestrictions: null - resources: - - pods/binding - verbs: - - create - - apiGroups: - - "" - attributeRestrictions: null - resources: - - events - verbs: - - create - - patch - - update + name: system:garbage-collector-controller + rules: [] - apiVersion: v1 kind: ClusterRole metadata: - annotations: - authorization.openshift.io/system-only: "true" creationTimestamp: null - name: system:deployment-controller - rules: - - apiGroups: - - extensions - attributeRestrictions: null - resources: - - deployments - verbs: - - get - - list - - update - - watch - - apiGroups: - - extensions - attributeRestrictions: null - resources: - - deployments/status - verbs: - - update - - apiGroups: - - extensions - attributeRestrictions: null - resources: - - replicasets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - attributeRestrictions: null - resources: - - pods - verbs: - - get - - list - - update - - watch - - apiGroups: - - "" - attributeRestrictions: null - resources: - - events - verbs: - - create - - patch - - update + name: system:job-controller + rules: [] - apiVersion: v1 kind: ClusterRole metadata: - annotations: - authorization.openshift.io/system-only: "true" creationTimestamp: null - name: system:deploymentconfig-controller - rules: - - apiGroups: - - "" - attributeRestrictions: null - resources: - - replicationcontrollers - verbs: - - list - - watch - - apiGroups: - - "" - attributeRestrictions: null - resources: - - replicationcontrollers - verbs: - - get - - update - - apiGroups: - - "" - attributeRestrictions: null - resources: - - pods - verbs: - - create - - delete - - get - - list - - update - - watch - - apiGroups: - - "" - attributeRestrictions: null - resources: - - events - verbs: - - create - - patch - - update + name: system:hpa-controller + rules: [] +- apiVersion: v1 + kind: ClusterRole + metadata: + creationTimestamp: null + name: system:daemonset-controller + rules: [] - apiVersion: v1 kind: ClusterRole metadata: - annotations: - authorization.openshift.io/system-only: "true" creationTimestamp: null name: system:disruption-controller - rules: - - apiGroups: - - extensions - attributeRestrictions: null - resources: - - deployments - verbs: - - list - - watch - - apiGroups: - - extensions - attributeRestrictions: null - resources: - - replicasets - verbs: - - list - - watch - - apiGroups: - - "" - attributeRestrictions: null - resources: - - replicationcontrollers - verbs: - - list - - watch - - apiGroups: - - apps - attributeRestrictions: null - resources: - - statefulsets - verbs: - - list - - watch - - apiGroups: - - policy - attributeRestrictions: null - resources: - - poddisruptionbudgets - verbs: - - get - - list - - watch - - apiGroups: - - policy - attributeRestrictions: null - resources: - - poddisruptionbudgets/status - verbs: - - update + rules: [] - apiVersion: v1 kind: ClusterRole metadata: - annotations: - authorization.openshift.io/system-only: "true" creationTimestamp: null - name: system:endpoint-controller - rules: - - apiGroups: - - "" - attributeRestrictions: null - resources: - - pods - - services - verbs: - - get - - list - - watch - - apiGroups: - - "" - attributeRestrictions: null - resources: - - endpoints - verbs: - - create - - delete - - get - - list - - update - - apiGroups: - - "" - attributeRestrictions: null - resources: - - endpoints/restricted - verbs: - - create + name: system:namespace-controller + rules: [] +- apiVersion: v1 + kind: ClusterRole + metadata: + creationTimestamp: null + name: system:gc-controller + rules: [] +- apiVersion: v1 + kind: ClusterRole + metadata: + creationTimestamp: null + name: system:certificate-signing-controller + rules: [] - apiVersion: v1 kind: ClusterRole metadata: - annotations: - authorization.openshift.io/system-only: "true" creationTimestamp: null - name: system:garbage-collector-controller - rules: - - apiGroups: - - '*' - attributeRestrictions: null - resources: - - '*' - verbs: - - delete - - get - - list - - patch - - update - - watch + name: system:statefulset-controller + rules: [] - apiVersion: v1 kind: ClusterRole metadata: annotations: authorization.openshift.io/system-only: "true" creationTimestamp: null - name: system:gc-controller + name: system:build-controller rules: - apiGroups: - "" attributeRestrictions: null resources: - - pods + - builds verbs: + - get - list - watch - apiGroups: - "" attributeRestrictions: null resources: - - nodes + - builds verbs: - - list - - watch + - update - apiGroups: + - build.openshift.io - "" attributeRestrictions: null resources: - - pods - verbs: - - delete -- apiVersion: v1 - kind: ClusterRole - metadata: - annotations: - authorization.openshift.io/system-only: "true" - creationTimestamp: null - name: system:hpa-controller - rules: - - apiGroups: - - extensions - - autoscaling - attributeRestrictions: null - resources: - - horizontalpodautoscalers - verbs: - - get - - list - - watch - - apiGroups: - - extensions - - autoscaling - attributeRestrictions: null - resources: - - horizontalpodautoscalers/status + - builds/custom + - builds/docker + - builds/jenkinspipeline + - builds/source verbs: - - update + - create - apiGroups: - - extensions - "" attributeRestrictions: null resources: - - replicationcontrollers/scale + - imagestreams verbs: - get - - update - apiGroups: - "" attributeRestrictions: null resources: - - deploymentconfigs/scale + - pods verbs: + - create + - delete - get - - update + - list - apiGroups: - "" attributeRestrictions: null @@ -3250,78 +2930,54 @@ items: - create - patch - update - - apiGroups: - - "" - attributeRestrictions: null - resources: - - pods - verbs: - - list - - apiGroups: - - "" - attributeRestrictions: null - resourceNames: - - 'https:heapster:' - resources: - - services - verbs: - - proxy - apiVersion: v1 kind: ClusterRole metadata: annotations: authorization.openshift.io/system-only: "true" creationTimestamp: null - name: system:job-controller + name: system:deployment-controller rules: - apiGroups: - extensions - - batch attributeRestrictions: null resources: - - cronjobs - - jobs - - scheduledjobs + - deployments verbs: - get - list + - update - watch - apiGroups: - extensions - - batch attributeRestrictions: null resources: - - cronjobs/status - - jobs/status - - scheduledjobs/status + - deployments/status verbs: - update - apiGroups: - extensions - - batch attributeRestrictions: null resources: - - jobs + - replicasets verbs: - create - delete + - get + - list + - patch - update + - watch - apiGroups: - "" attributeRestrictions: null resources: - pods verbs: + - get - list + - update - watch - - apiGroups: - - "" - attributeRestrictions: null - resources: - - pods - verbs: - - create - - delete - apiGroups: - "" attributeRestrictions: null @@ -3337,36 +2993,45 @@ items: annotations: authorization.openshift.io/system-only: "true" creationTimestamp: null - name: system:namespace-controller + name: system:deploymentconfig-controller rules: - apiGroups: - "" attributeRestrictions: null resources: - - namespaces + - replicationcontrollers verbs: - - delete - - get - list - watch - apiGroups: - "" attributeRestrictions: null resources: - - namespaces/finalize - - namespaces/status + - replicationcontrollers verbs: + - get - update - apiGroups: - - '*' + - "" attributeRestrictions: null resources: - - '*' + - pods verbs: + - create - delete - - deletecollection - get - list + - update + - watch + - apiGroups: + - "" + attributeRestrictions: null + resources: + - events + verbs: + - create + - patch + - update - apiVersion: v1 kind: ClusterRole metadata: @@ -3703,50 +3368,6 @@ items: - create - patch - update -- apiVersion: v1 - kind: ClusterRole - metadata: - annotations: - authorization.openshift.io/system-only: "true" - creationTimestamp: null - name: system:replicaset-controller - rules: - - apiGroups: - - extensions - attributeRestrictions: null - resources: - - replicasets - verbs: - - get - - list - - update - - watch - - apiGroups: - - extensions - attributeRestrictions: null - resources: - - replicasets/status - verbs: - - update - - apiGroups: - - "" - attributeRestrictions: null - resources: - - pods - verbs: - - create - - delete - - list - - watch - - apiGroups: - - "" - attributeRestrictions: null - resources: - - events - verbs: - - create - - patch - - update - apiVersion: v1 kind: ClusterRole metadata: @@ -3861,72 +3482,6 @@ items: - list - update - watch -- apiVersion: v1 - kind: ClusterRole - metadata: - annotations: - authorization.openshift.io/system-only: "true" - creationTimestamp: null - name: system:statefulset-controller - rules: - - apiGroups: - - "" - attributeRestrictions: null - resources: - - pods - verbs: - - list - - watch - - apiGroups: - - apps - attributeRestrictions: null - resources: - - statefulsets - verbs: - - list - - watch - - apiGroups: - - apps - attributeRestrictions: null - resources: - - statefulsets - verbs: - - get - - apiGroups: - - apps - attributeRestrictions: null - resources: - - statefulsets/status - verbs: - - update - - apiGroups: - - "" - attributeRestrictions: null - resources: - - pods - verbs: - - create - - delete - - get - - patch - - update - - apiGroups: - - "" - attributeRestrictions: null - resources: - - persistentvolumeclaims - verbs: - - create - - get - - apiGroups: - - "" - attributeRestrictions: null - resources: - - events - verbs: - - create - - patch - - update - apiVersion: v1 kind: ClusterRole metadata: