Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

deploy: fix the owner reference kind to be rc #13996

Merged
merged 9 commits into from
May 22, 2017
19 changes: 15 additions & 4 deletions pkg/authorization/authorizer/subjects_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,21 @@ func TestSubjects(t *testing.T) {
Resource: "pods",
},
expectedUsers: sets.NewString("Anna", "ClusterAdmin", "Ellen", "Valerie",
"system:serviceaccount:adze:second", "system:serviceaccount:foo:default", "system:serviceaccount:other:first",
"system:serviceaccount:kube-system:deployment-controller", "system:serviceaccount:kube-system:endpoint-controller", "system:serviceaccount:kube-system:generic-garbage-collector",
"system:serviceaccount:kube-system:namespace-controller", "system:serviceaccount:kube-system:persistent-volume-binder", "system:serviceaccount:kube-system:statefulset-controller",
"system:admin", "system:kube-scheduler"),
"system:serviceaccount:adze:second",
"system:serviceaccount:foo:default",
"system:serviceaccount:other:first",
"system:serviceaccount:kube-system:deployment-controller",
"system:serviceaccount:kube-system:endpoint-controller",
"system:serviceaccount:kube-system:generic-garbage-collector",
"system:serviceaccount:kube-system:namespace-controller",
"system:serviceaccount:kube-system:persistent-volume-binder",
"system:serviceaccount:kube-system:statefulset-controller",
"system:admin",
"system:kube-scheduler",
"system:serviceaccount:openshift-infra:build-controller",
"system:serviceaccount:openshift-infra:deployer-controller",
"system:serviceaccount:openshift-infra:deploymentconfig-controller",
),
expectedGroups: sets.NewString("RootUsers", "system:cluster-admins", "system:cluster-readers", "system:masters", "system:nodes"),
}
test.clusterPolicies = newDefaultClusterPolicies()
Expand Down
106 changes: 106 additions & 0 deletions pkg/cmd/server/bootstrappolicy/controller_policy.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
package bootstrappolicy

import (
"strings"

"github.com/golang/glog"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
rbac "k8s.io/kubernetes/pkg/apis/rbac"
)

const saRolePrefix = "system:openshift:controller:"

var (
// controllerRoles is a slice of roles used for controllers
controllerRoles = []rbac.ClusterRole{}
// controllerRoleBindings is a slice of roles used for controllers
controllerRoleBindings = []rbac.ClusterRoleBinding{}
)

func addControllerRole(role rbac.ClusterRole) {
if !strings.HasPrefix(role.Name, saRolePrefix) {
glog.Fatalf(`role %q must start with %q`, role.Name, saRolePrefix)
}

for _, existingRole := range controllerRoles {
if role.Name == existingRole.Name {
glog.Fatalf("role %q was already registered", role.Name)
}
}

if role.Annotations == nil {
role.Annotations = map[string]string{}
}
role.Annotations[roleSystemOnly] = roleIsSystemOnly

controllerRoles = append(controllerRoles, role)

controllerRoleBindings = append(controllerRoleBindings,
rbac.NewClusterBinding(role.Name).SAs(DefaultOpenShiftInfraNamespace, role.Name[len(saRolePrefix):]).BindingOrDie())
}

func eventsRule() rbac.PolicyRule {
return rbac.NewRule("create", "update", "patch").Groups(kapiGroup).Resources("events").RuleOrDie()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did you find out why we have update and patch here? Is it event compaction?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@deads2k @sttts ? no clue, but we have that in other controllers...

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@deads2k made a func for it so it must be useful ;-)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did you find out why we have update and patch here? Is it event compaction?

Yes, it's still done client-side after all this time.

}

func init() {
// build-controller
addControllerRole(rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + InfraBuildControllerServiceAccountName},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch", "update", "delete").Groups(buildGroup, legacyBuildGroup).Resources("builds").RuleOrDie(),
rbac.NewRule("get").Groups(buildGroup, legacyBuildGroup).Resources("buildconfigs").RuleOrDie(),
rbac.NewRule("create").Groups(buildGroup, legacyBuildGroup).Resources("builds/docker", "builds/source", "builds/custom", "builds/jenkinspipeline").RuleOrDie(),
rbac.NewRule("get").Groups(imageGroup, legacyImageGroup).Resources("imagestreams").RuleOrDie(),
rbac.NewRule("get", "list", "create", "delete").Groups(kapiGroup).Resources("pods").RuleOrDie(),
rbac.NewRule("get").Groups(kapiGroup).Resources("namespaces").RuleOrDie(),
eventsRule(),
},
})

// deployer-controller
addControllerRole(rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + InfraDeployerControllerServiceAccountName},
Rules: []rbac.PolicyRule{
rbac.NewRule("create", "get", "list", "watch", "patch", "delete").Groups(kapiGroup).Resources("pods").RuleOrDie(),
rbac.NewRule("get", "list", "watch", "update").Groups(kapiGroup).Resources("replicationcontrollers").RuleOrDie(),
eventsRule(),
},
})

// deploymentconfig-controller
addControllerRole(rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + InfraDeploymentConfigControllerServiceAccountName},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch").Groups(kapiGroup).Resources("pods").RuleOrDie(),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can remove this rule and anything related to pods in the DC controller after #14046 is merged.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sure, I will update this PR.

rbac.NewRule("create", "get", "list", "watch", "update", "delete").Groups(kapiGroup).Resources("replicationcontrollers").RuleOrDie(),
rbac.NewRule("update").Groups(deployGroup, legacyDeployGroup).Resources("deploymentconfigs/status").RuleOrDie(),
rbac.NewRule("get", "list", "watch").Groups(deployGroup, legacyDeployGroup).Resources("deploymentconfigs").RuleOrDie(),
eventsRule(),
},
})

// deployment-trigger-controller
addControllerRole(rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + InfraDeploymentTriggerControllerServiceAccountName},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch").Groups(kapiGroup).Resources("replicationcontrollers").RuleOrDie(),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We shouldn't need to list or watch RCs here, the only thing this controller needs is get.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

kcorelistersinternal.NewReplicationControllerLister(rcInformer.GetIndexer())

will this work only with get?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

List and watch are used by the informer but we have no use for the informer in the controller. We just GET rcs and compare their templates w/ the DC template.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess since we use the shared informer, we are bound to have list and watch for this controller?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Kargakis that is what I think @deads2k ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

iow. when we run this as a standalone process in a pod, we will need these permissions to get the shared informer?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

iow. when we run this as a standalone process in a pod, we will need these permissions to get the shared informer?

Yes. and I acknowledge that such a separation may be a long way off.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hard choice, both options have pros and cons.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

well given that this controller was on privileged loopback client before, I would leave this as it is for now?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, it's fine

rbac.NewRule("get", "list", "watch").Groups(deployGroup, legacyDeployGroup).Resources("deploymentconfigs").RuleOrDie(),
rbac.NewRule("get", "list", "watch").Groups(imageGroup, legacyImageGroup).Resources("imagestreams").RuleOrDie(),

rbac.NewRule("create").Groups(deployGroup, legacyDeployGroup).Resources("deploymentconfigs/instantiate").RuleOrDie(),
eventsRule(),
},
})
}

// ControllerRoles returns the cluster roles used by controllers
func ControllerRoles() []rbac.ClusterRole {
return controllerRoles
}

// ControllerRoleBindings returns the role bindings used by controllers
func ControllerRoleBindings() []rbac.ClusterRoleBinding {
return controllerRoleBindings
}
6 changes: 6 additions & 0 deletions pkg/cmd/server/bootstrappolicy/dead.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,10 @@ func init() {
addDeadClusterRole("system:gc-controller")
addDeadClusterRole("system:certificate-signing-controller")
addDeadClusterRole("system:statefulset-controller")

// these were moved under system:openshift:controller:*
addDeadClusterRole("system:build-controller")
addDeadClusterRole("system:deploymentconfig-controller")
addDeadClusterRole("system:deployment-controller")

}
146 changes: 11 additions & 135 deletions pkg/cmd/server/bootstrappolicy/infra_sa_policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,20 +17,18 @@ import (
buildapi "github.com/openshift/origin/pkg/build/api"
deployapi "github.com/openshift/origin/pkg/deploy/api"
imageapi "github.com/openshift/origin/pkg/image/api"

// we need the conversions registered for our init block
_ "github.com/openshift/origin/pkg/authorization/api/install"
)

const (
InfraBuildControllerServiceAccountName = "build-controller"
BuildControllerRoleName = "system:build-controller"

InfraImageTriggerControllerServiceAccountName = "imagetrigger-controller"
ImageTriggerControllerRoleName = "system:imagetrigger-controller"

InfraDeploymentConfigControllerServiceAccountName = "deploymentconfig-controller"
DeploymentConfigControllerRoleName = "system:deploymentconfig-controller"

InfraDeploymentControllerServiceAccountName = "deployment-controller"
DeploymentControllerRoleName = "system:deployment-controller"
InfraBuildControllerServiceAccountName = "build-controller"
InfraImageTriggerControllerServiceAccountName = "imagetrigger-controller"
ImageTriggerControllerRoleName = "system:imagetrigger-controller"
InfraDeploymentConfigControllerServiceAccountName = "deploymentconfig-controller"
InfraDeploymentTriggerControllerServiceAccountName = "deployment-trigger-controller"
InfraDeployerControllerServiceAccountName = "deployer-controller"

InfraPersistentVolumeBinderControllerServiceAccountName = "pv-binder-controller"
PersistentVolumeBinderControllerRoleName = "system:pv-binder-controller"
Expand Down Expand Up @@ -127,58 +125,11 @@ func (r *InfraServiceAccounts) AllRoles() []authorizationapi.ClusterRole {
}

func init() {
var err error

InfraSAs.serviceAccounts = sets.String{}
InfraSAs.saToRole = map[string]authorizationapi.ClusterRole{}

var err error
err = InfraSAs.addServiceAccount(
InfraBuildControllerServiceAccountName,
authorizationapi.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: BuildControllerRoleName,
},
Rules: []authorizationapi.PolicyRule{
// BuildControllerFactory.buildLW
// BuildControllerFactory.buildDeleteLW
{
Verbs: sets.NewString("get", "list", "watch"),
Resources: sets.NewString("builds"),
},
// BuildController.BuildUpdater (OSClientBuildClient)
{
Verbs: sets.NewString("update"),
Resources: sets.NewString("builds"),
},
// Create permission on virtual build type resources allows builds of those types to be updated
{
Verbs: sets.NewString("create"),
Resources: sets.NewString("builds/docker", "builds/source", "builds/custom", "builds/jenkinspipeline"),
APIGroups: []string{buildapi.GroupName, buildapi.LegacyGroupName},
},
// BuildController.ImageStreamClient (ControllerClient)
{
Verbs: sets.NewString("get"),
Resources: sets.NewString("imagestreams"),
},
// BuildController.PodManager (ControllerClient)
// BuildDeleteController.PodManager (ControllerClient)
// BuildControllerFactory.buildDeleteLW
{
Verbs: sets.NewString("get", "list", "create", "delete"),
Resources: sets.NewString("pods"),
},
// BuildController.Recorder (EventBroadcaster)
{
Verbs: sets.NewString("create", "update", "patch"),
Resources: sets.NewString("events"),
},
},
},
)
if err != nil {
panic(err)
}

err = InfraSAs.addServiceAccount(
InfraImageTriggerControllerServiceAccountName,
authorizationapi.ClusterRole{
Expand Down Expand Up @@ -230,81 +181,6 @@ func init() {
panic(err)
}

err = InfraSAs.addServiceAccount(
InfraDeploymentConfigControllerServiceAccountName,
authorizationapi.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: DeploymentConfigControllerRoleName,
},
Rules: []authorizationapi.PolicyRule{
// DeploymentControllerFactory.deploymentLW
{
Verbs: sets.NewString("list", "watch"),
Resources: sets.NewString("replicationcontrollers"),
},
// DeploymentControllerFactory.deploymentClient
{
Verbs: sets.NewString("get", "update"),
Resources: sets.NewString("replicationcontrollers"),
},
// DeploymentController.podClient
{
Verbs: sets.NewString("get", "list", "create", "watch", "delete", "update"),
Resources: sets.NewString("pods"),
},
// DeploymentController.recorder (EventBroadcaster)
{
Verbs: sets.NewString("create", "update", "patch"),
Resources: sets.NewString("events"),
},
},
},
)
if err != nil {
panic(err)
}

err = InfraSAs.addServiceAccount(
InfraDeploymentControllerServiceAccountName,
authorizationapi.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: DeploymentControllerRoleName,
},
Rules: []authorizationapi.PolicyRule{
{
APIGroups: []string{extensions.GroupName},
Verbs: sets.NewString("get", "list", "watch", "update"),
Resources: sets.NewString("deployments"),
},
{
APIGroups: []string{extensions.GroupName},
Verbs: sets.NewString("update"),
Resources: sets.NewString("deployments/status"),
},
{
APIGroups: []string{extensions.GroupName},
Verbs: sets.NewString("list", "watch", "get", "create", "patch", "update", "delete"),
Resources: sets.NewString("replicasets"),
},
{
APIGroups: []string{""},
// TODO: remove "update" once
// https://github.com/kubernetes/kubernetes/issues/36897 is resolved.
Verbs: sets.NewString("get", "list", "watch", "update"),
Resources: sets.NewString("pods"),
},
{
APIGroups: []string{""},
Verbs: sets.NewString("create", "update", "patch"),
Resources: sets.NewString("events"),
},
},
},
)
if err != nil {
panic(err)
}

err = InfraSAs.addServiceAccount(
InfraPersistentVolumeRecyclerControllerServiceAccountName,
authorizationapi.ClusterRole{
Expand Down
24 changes: 22 additions & 2 deletions pkg/cmd/server/bootstrappolicy/policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -959,6 +959,11 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole {
if err != nil {
panic(err)
}
openshiftControllerRoles, err := GetOpenshiftControllerBootstrapClusterRoles()
// coder error
if err != nil {
panic(err)
}

// Eventually openshift controllers and kube controllers have different prefixes
// so we will only need to check conflicts on the "normal" cluster roles
Expand Down Expand Up @@ -990,6 +995,7 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole {
finalClusterRoles := []authorizationapi.ClusterRole{}
finalClusterRoles = append(finalClusterRoles, openshiftClusterRoles...)
finalClusterRoles = append(finalClusterRoles, openshiftSAClusterRoles...)
finalClusterRoles = append(finalClusterRoles, openshiftControllerRoles...)
finalClusterRoles = append(finalClusterRoles, kubeSAClusterRoles...)
for i := range kubeClusterRoles {
if !clusterRoleConflicts.Has(kubeClusterRoles[i].Name) {
Expand Down Expand Up @@ -1189,7 +1195,12 @@ func GetBootstrapClusterRoleBindings() []authorizationapi.ClusterRoleBinding {
if err != nil {
panic(err)
}
kubeSAClusterRoleBindings, err := GetKubeControllerBootstrapClusterRoleBindings()
kubeControllerClusterRoleBindings, err := GetKubeControllerBootstrapClusterRoleBindings()
// coder error
if err != nil {
panic(err)
}
openshiftControllerClusterRoleBindings, err := GetOpenshiftControllerBootstrapClusterRoleBindings()
// coder error
if err != nil {
panic(err)
Expand Down Expand Up @@ -1220,7 +1231,8 @@ func GetBootstrapClusterRoleBindings() []authorizationapi.ClusterRoleBinding {

finalClusterRoleBindings := []authorizationapi.ClusterRoleBinding{}
finalClusterRoleBindings = append(finalClusterRoleBindings, openshiftClusterRoleBindings...)
finalClusterRoleBindings = append(finalClusterRoleBindings, kubeSAClusterRoleBindings...)
finalClusterRoleBindings = append(finalClusterRoleBindings, kubeControllerClusterRoleBindings...)
finalClusterRoleBindings = append(finalClusterRoleBindings, openshiftControllerClusterRoleBindings...)
for i := range kubeClusterRoleBindings {
if !clusterRoleBindingConflicts.Has(kubeClusterRoleBindings[i].Name) {
finalClusterRoleBindings = append(finalClusterRoleBindings, kubeClusterRoleBindings[i])
Expand Down Expand Up @@ -1263,6 +1275,10 @@ func GetKubeControllerBootstrapClusterRoleBindings() ([]authorizationapi.Cluster
return convertClusterRoleBindings(bootstrappolicy.ControllerRoleBindings())
}

func GetOpenshiftControllerBootstrapClusterRoleBindings() ([]authorizationapi.ClusterRoleBinding, error) {
return convertClusterRoleBindings(ControllerRoleBindings())
}

func convertClusterRoleBindings(in []rbac.ClusterRoleBinding) ([]authorizationapi.ClusterRoleBinding, error) {
out := []authorizationapi.ClusterRoleBinding{}
errs := []error{}
Expand All @@ -1287,6 +1303,10 @@ func GetKubeControllerBootstrapClusterRoles() ([]authorizationapi.ClusterRole, e
return convertClusterRoles(bootstrappolicy.ControllerRoles())
}

func GetOpenshiftControllerBootstrapClusterRoles() ([]authorizationapi.ClusterRole, error) {
return convertClusterRoles(ControllerRoles())
}

func convertClusterRoles(in []rbac.ClusterRole) ([]authorizationapi.ClusterRole, error) {
out := []authorizationapi.ClusterRole{}
errs := []error{}
Expand Down
Loading