Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Issue #148: introduce cluster-scoped-rbac #149

Merged
merged 8 commits into from
Mar 7, 2023
244 changes: 244 additions & 0 deletions cmd/export/cluster.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,244 @@
package export

import (
"fmt"
"strings"

authv1 "github.com/openshift/api/authorization/v1"
securityv1 "github.com/openshift/api/security/v1"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
)

type ClusterScopeHandler struct {
}

type admittedResource struct {
APIgroup string
Kind string
}

var admittedClusterScopeResources = []admittedResource{
{Kind: "ClusterRoleBinding", APIgroup: "rbac.authorization.k8s.io"},
{Kind: "ClusterRole", APIgroup: "rbac.authorization.k8s.io"},
{Kind: "SecurityContextConstraints", APIgroup: "security.openshift.io"},
}

func NewClusterScopeHandler() *ClusterScopeHandler {
clusterScopeHandler := &ClusterScopeHandler{}

return clusterScopeHandler
}

func isClusterScopedResource(apiGroup string, kind string) bool {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
func isClusterScopedResource(apiGroup string, kind string) bool {
func isAdmittedClusterScopedResource(apiGroup string, kind string) bool {

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe isAdmitted is even better, currently there is a mismatch between the name of the function and what it's doing.

for _, admitted := range admittedClusterScopeResources {
if admitted.Kind == kind && admitted.APIgroup == apiGroup {
return true
}
}
return false
}

func (c *ClusterScopeHandler) filterRbacResources(resources []*groupResource, log logrus.FieldLogger) []*groupResource {
log.Debug("Looking for ServiceAccount resources")

handler := NewClusterScopedRbacHandler(log)
var filteredResources []*groupResource
for _, r := range resources {
kind := r.APIResource.Kind
if kind == "ServiceAccount" {
for _, obj := range r.objects.Items {
log.Debugf("Adding ServiceAccount %s in namespace %s", obj.GetName(), obj.GetNamespace())
handler.serviceAccounts = append(handler.serviceAccounts, obj)
}
}
if isClusterScopedResource(r.APIGroup, kind) {
log.Debugf("Adding %d Cluster resource of type %s", len(r.objects.Items), kind)
handler.clusterResources[kind] = r
} else {
filteredResources = append(filteredResources, r)
}
}

for _, k := range admittedClusterScopeResources {
filtered, ok := handler.filteredResourcesOfKind(k)
if ok && len(filtered.objects.Items) > 0 {
filteredResources = append(filteredResources, filtered)
}
}

return filteredResources
}

type ClusterScopedRbacHandler struct {
log logrus.FieldLogger
readyToFilter bool
serviceAccounts []unstructured.Unstructured
clusterResources map[string]*groupResource

filteredClusterRoleBindings *groupResource
}

func NewClusterScopedRbacHandler(log logrus.FieldLogger) *ClusterScopedRbacHandler {
handler := &ClusterScopedRbacHandler{log: log, readyToFilter: false}
handler.clusterResources = make(map[string]*groupResource)
return handler
}

func (c *ClusterScopedRbacHandler) prepareForFiltering() {
c.readyToFilter = true
c.log.Infof("Preparing matching ClusterRoleBindings")

clusterRoleBindings, ok := c.clusterResources["ClusterRoleBinding"]
if ok {
c.filteredClusterRoleBindings = &groupResource{
APIGroup: clusterRoleBindings.APIGroup,
APIVersion: clusterRoleBindings.APIVersion,
APIGroupVersion: clusterRoleBindings.APIGroupVersion,
APIResource: clusterRoleBindings.APIResource,
}
filteredClusterRoleBindings := unstructured.UnstructuredList{Items: []unstructured.Unstructured{}}
for _, crb := range clusterRoleBindings.objects.Items {
if c.acceptClusterRoleBinding(crb) {
filteredClusterRoleBindings.Items = append(filteredClusterRoleBindings.Items, crb)
c.log.Infof("Found matching %s %s", crb.GetKind(), crb.GetName())
}
}
c.filteredClusterRoleBindings.objects = &filteredClusterRoleBindings
}
}

func (c *ClusterScopedRbacHandler) filteredResourcesOfKind(resource admittedResource) (*groupResource, bool) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This filter logic, to me, seems more appropriate for the transform step.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey @djzager, I perfectly agree with you that this seems to be a transform step, and you almost anticipated some of the reasons why I haven't followed this path (e.g., complexity and more repos impacted), but at the moment this can't be done without adding any context related to the other exported resources, because the PluginRun interface works only on individual resources, but the decision whether to accept or whiteout an RBAC resource depends also on other resources (e.g., is there any ServiceAccount matching this ClusterRoleBinding?). In brief, we should pass some kind of "context" to the plugin execution which, in turn, for each and every RBAC resource, should unmarshal all the files in all the exported namespaces to look for these matches.
I'm totally worried by performance, as you can imagine, this is why the initial implementation, even if it broke the usual 3-steps approach, looked more promising.
WDYT?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fair point. I believe the right answer for us in this scenario would be to articulate why we can't put this in transform plugin(s) via GitHub issue and proceed with this PR as it is currently implemented.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am plus one, a follow up issue would be great

if !c.readyToFilter {
c.prepareForFiltering()
}

kind := resource.Kind
clusterGroupResource, ok := c.clusterResources[kind]
if ok {
if kind == "ClusterRoleBinding" {
return c.filteredClusterRoleBindings, true
}

filtered := make([]unstructured.Unstructured, 0)
initialLen := len(clusterGroupResource.objects.Items)
c.log.Infof("Filtering for kind %s (%d found)", kind, initialLen)
for _, r := range clusterGroupResource.objects.Items {
if c.accept(kind, r) {
filtered = append(filtered, r)
}
}
clusterGroupResource.objects.Items = filtered
c.log.Infof("After filtering %d remained out of %d", len(clusterGroupResource.objects.Items), initialLen)
}
return clusterGroupResource, ok
}

func (c *ClusterScopedRbacHandler) accept(kind string, clusterResource unstructured.Unstructured) bool {
c.log.Debugf("Checking acceptance for %s of kind %s", clusterResource.GetName(), kind)
if clusterResource.GroupVersionKind().Kind == "ClusterRole" {
return c.acceptClusterRole(clusterResource)
} else if clusterResource.GroupVersionKind().Kind == "SecurityContextConstraints" {
return c.acceptSecurityContextConstraints(clusterResource)
}
return false
}

func (c *ClusterScopedRbacHandler) acceptClusterRoleBinding(clusterResource unstructured.Unstructured) bool {
var crb authv1.ClusterRoleBinding
err := runtime.DefaultUnstructuredConverter.
FromUnstructured(clusterResource.Object, &crb)
if err != nil {
c.log.Warnf("Cannot convert to authv1.ClusterRoleBinding: %s", err)
} else {
for _, s := range crb.Subjects {
if s.Kind == "ServiceAccount" && c.anyServiceAccountInNamespace(s.Namespace, s.Name) {
c.log.Infof("Accepted %s of kind %s", clusterResource.GetName(), clusterResource.GetKind())
return true
}
}
}
return false
}

func (c *ClusterScopedRbacHandler) acceptClusterRole(clusterResource unstructured.Unstructured) bool {
var cr authv1.ClusterRole
err := runtime.DefaultUnstructuredConverter.
FromUnstructured(clusterResource.Object, &cr)
if err != nil {
c.log.Warnf("Cannot convert to authv1.ClusterRole: %s", err)
} else {
for _, f := range c.filteredClusterRoleBindings.objects.Items {
var crb authv1.ClusterRoleBinding
err := runtime.DefaultUnstructuredConverter.
FromUnstructured(f.Object, &crb)
if err != nil {
c.log.Warnf("Cannot convert to authv1.ClusterRoleBinding: %s", err)
} else {
if crb.RoleRef.Kind == "ClusterRole" && crb.RoleRef.Name == cr.Name {
c.log.Infof("Accepted %s of kind %s", clusterResource.GetName(), clusterResource.GetKind())
return true
}
}
}
}
return false
}

func (c *ClusterScopedRbacHandler) acceptSecurityContextConstraints(clusterResource unstructured.Unstructured) bool {
var scc securityv1.SecurityContextConstraints
err := runtime.DefaultUnstructuredConverter.
FromUnstructured(clusterResource.Object, &scc)
if err != nil {
c.log.Warnf("Cannot convert to securityv1.SecurityContextConstraints: %s", err)
return false
}

for _, f := range c.filteredClusterRoleBindings.objects.Items {
var crb authv1.ClusterRoleBinding
err := runtime.DefaultUnstructuredConverter.
FromUnstructured(f.Object, &crb)
if err != nil {
c.log.Warnf("Cannot convert to authv1.ClusterRoleBinding: %s", err)
continue
}

if crb.RoleRef.Kind == "SecurityContextConstraints" && crb.RoleRef.Name == scc.Name {
c.log.Infof("Accepted %s of kind %s", clusterResource.GetName(), clusterResource.GetKind())
return true
} else {
sccSystemName := fmt.Sprintf("system:openshift:scc:%s", clusterResource.GetName())
if crb.RoleRef.Kind == "ClusterRole" && crb.RoleRef.Name == sccSystemName {
c.log.Infof("Accepted %s of kind %s (match via ClusterRoleBinding %s)",
clusterResource.GetName(), clusterResource.GetKind(), crb.Name)
return true
}
}
}

// Last option, look at the users field if it contains one of the exported serviceaccounts
for _, u := range scc.Users {
if strings.Contains(u, "system:serviceaccount:") && len(strings.Split(u, ":")) == 4 {
namespaceName := strings.Split(u, ":")[2]
serviceAccountName := strings.Split(u, ":")[3]
if c.anyServiceAccountInNamespace(namespaceName, serviceAccountName) {
c.log.Infof("Accepted %s of kind %s (match wia user %s)",
clusterResource.GetName(), clusterResource.GetKind(), u)
return true
}
}
}

return false
}

func (c *ClusterScopedRbacHandler) anyServiceAccountInNamespace(namespaceName string, serviceAccountName string) bool {
c.log.Debugf("Looking for SA %s in %s", serviceAccountName, namespaceName)
for _, sa := range c.serviceAccounts {
if sa.GetName() == serviceAccountName && sa.GetNamespace() == namespaceName {
return true
}
}
return false
}
31 changes: 21 additions & 10 deletions cmd/export/discover.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ type groupResourceError struct {
Error error `json:"error"`
}

func writeResources(resources []*groupResource, resourceDir string, log logrus.FieldLogger) []error {
func writeResources(resources []*groupResource, clusterResourceDir string, resourceDir string, log logrus.FieldLogger) []error {
errs := []error{}
for _, r := range resources {
log.Infof("Writing objects of resource: %s to the output directory\n", r.APIResource.Name)
Expand All @@ -45,7 +45,11 @@ func writeResources(resources []*groupResource, resourceDir string, log logrus.F
}

for _, obj := range r.objects.Items {
path := filepath.Join(resourceDir, getFilePath(obj))
targetDir := resourceDir
if obj.GetNamespace() == "" {
targetDir = clusterResourceDir
}
path := filepath.Join(targetDir, getFilePath(obj))
f, err := os.Create(path)
if err != nil {
errs = append(errs, err)
Expand Down Expand Up @@ -124,7 +128,7 @@ func getFilePath(obj unstructured.Unstructured) string {
return strings.Join([]string{obj.GetKind(), obj.GetObjectKind().GroupVersionKind().GroupKind().Group, obj.GetObjectKind().GroupVersionKind().Version, namespace, obj.GetName()}, "_") + ".yaml"
}

func resourceToExtract(namespace string, labelSelector string, dynamicClient dynamic.Interface, lists []*metav1.APIResourceList, apiGroups []metav1.APIGroup, log logrus.FieldLogger) ([]*groupResource, []*groupResourceError) {
func resourceToExtract(namespace string, labelSelector string, clusterScopedRbac bool, dynamicClient dynamic.Interface, lists []*metav1.APIResourceList, apiGroups []metav1.APIGroup, log logrus.FieldLogger) ([]*groupResource, []*groupResourceError) {
resources := []*groupResource{}
errors := []*groupResourceError{}

Expand All @@ -147,8 +151,8 @@ func resourceToExtract(namespace string, labelSelector string, dynamicClient dyn
continue
}

if !resource.Namespaced {
log.Debugf("resource: %s.%s is clusterscoped, skipping\n", gv.String(), resource.Kind)
if !isAdmittedResource(clusterScopedRbac, gv, resource) {
log.Debugf("resource: %s.%s is clusterscoped or not admitted kind, skipping\n", gv.String(), resource.Kind)
continue
}

Expand Down Expand Up @@ -201,18 +205,25 @@ func resourceToExtract(namespace string, labelSelector string, dynamicClient dyn
return resources, errors
}

func isAdmittedResource(clusterScopedRbac bool, gv schema.GroupVersion, resource metav1.APIResource) bool {
if !resource.Namespaced {
return clusterScopedRbac && isClusterScopedResource(gv.Group, resource.Kind)
}
return true
}

func getObjects(g *groupResource, namespace string, labelSelector string, d dynamic.Interface, logger logrus.FieldLogger) (*unstructured.UnstructuredList, error) {
c := d.Resource(schema.GroupVersionResource{
Group: g.APIGroup,
Version: g.APIVersion,
Resource: g.APIResource.Name,
})
if !g.APIResource.Namespaced {
return &unstructured.UnstructuredList{}, nil
}

p := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) {
return c.Namespace(namespace).List(context.Background(), opts)
if g.APIResource.Namespaced {
return c.Namespace(namespace).List(context.Background(), opts)
} else {
return c.List(context.Background(), opts)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if you have a set of resources that you want it might be worth filtering the cluster scoped calls to just those resources

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You mean adding ListOptions so that, for instance, when we collect the ServiceAccounts we also collect the meaningful ClusterRoleBinding and ClusterRole by using FieldSelector queries to fetch only the relevant instances?
If this is the purpose of the comment, it would require more changes to the discovery module because ATM if fetches only resources of a given, single kind. If we return resources of multiple kinds, instead, the code using getObjects function would need more changes.
Please let me know what was your actual goal.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

FYI, not all the fields are eligible to be used in the ListOptions.FieldSelector, e.g. the following command:
oc get clusterrolebindings.rbac.authorization.k8s.io --field-selector roleRef.name=AAA
would actually return an error because of that:
Error from server (BadRequest): Unable to find "rbac.authorization.k8s.io/v1, Resource=clusterrolebindings" that match label selector "", field selector "roleRef.name=AAA": "roleRef.name" is not a known field selector: only "metadata.name", "metadata.namespace"

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think what I was asking, is that we look at the kind, and only retrieve the kinds of RBAC and SCC rather than filter here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

when it reaches this point in the getObjects function, line 226 can only collect the 3 modelled kinds of cluster-scoped RBAC resources, all other kinds were discarded before invoking getObjects

}
})
listOptions := metav1.ListOptions{}
if labelSelector != "" {
Expand Down
25 changes: 21 additions & 4 deletions cmd/export/export.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ type ExportOptions struct {
exportDir string
labelSelector string
userSpecifiedNamespace string
clusterScopedRbac bool
asExtras string
extras map[string][]string
QPS float32
Expand Down Expand Up @@ -80,14 +81,25 @@ func (o *ExportOptions) Run() error {
log := o.globalFlags.GetLogger()

// create export directory if it doesnt exist
err = os.MkdirAll(filepath.Join(o.exportDir, "resources", o.userSpecifiedNamespace), 0700)
resourceDir := filepath.Join(o.exportDir, "resources", o.userSpecifiedNamespace)
err = os.MkdirAll(resourceDir, 0700)
switch {
case os.IsExist(err):
case err != nil:
log.Errorf("error creating the resources directory: %#v", err)
return err
}

// create _clluster directory if it doesnt exist
clusterResourceDir := filepath.Join(o.exportDir, "resources", o.userSpecifiedNamespace, "_cluster")
if o.clusterScopedRbac {
err = os.MkdirAll(clusterResourceDir, 0700)
switch {
case os.IsExist(err):
case err != nil:
log.Errorf("error creating the cluster resources directory: %#v", err)
return err
}
}
// create export directory if it doesnt exist
err = os.MkdirAll(filepath.Join(o.exportDir, "failures", o.userSpecifiedNamespace), 0700)
switch {
Expand Down Expand Up @@ -130,10 +142,14 @@ func (o *ExportOptions) Run() error {

var errs []error

resources, resourceErrs := resourceToExtract(o.userSpecifiedNamespace, o.labelSelector, dynamicClient, discoveryHelper.Resources(), discoveryHelper.APIGroups(), log)
resources, resourceErrs := resourceToExtract(o.userSpecifiedNamespace, o.labelSelector, o.clusterScopedRbac, dynamicClient, discoveryHelper.Resources(), discoveryHelper.APIGroups(), log)
clusterScopeHandler := NewClusterScopeHandler()
if o.clusterScopedRbac {
resources = clusterScopeHandler.filterRbacResources(resources, log)
}

log.Debugf("attempting to write resources to files\n")
writeResourcesErrors := writeResources(resources, filepath.Join(o.exportDir, "resources", o.userSpecifiedNamespace), log)
writeResourcesErrors := writeResources(resources, clusterResourceDir, resourceDir, log)
for _, e := range writeResourcesErrors {
log.Warnf("error writing manifests to file: %#v, ignoring\n", e)
}
Expand Down Expand Up @@ -182,6 +198,7 @@ func NewExportCommand(streams genericclioptions.IOStreams, f *flags.GlobalFlags)

cmd.Flags().StringVarP(&o.exportDir, "export-dir", "e", "export", "The path where files are to be exported")
cmd.Flags().StringVarP(&o.labelSelector, "label-selector", "l", "", "Restrict export to resources matching a label selector")
cmd.Flags().BoolVarP(&o.clusterScopedRbac, "cluster-scoped-rbac", "c", false, "Include cluster-scoped RBAC resources")
cmd.Flags().StringVar(&o.asExtras, "as-extras", "", "The extra info for impersonation can only be used with User or Group but is not required. An example is --as-extras key=string1,string2;key2=string3")
cmd.Flags().Float32VarP(&o.QPS, "qps", "q", 100, "Query Per Second Rate.")
cmd.Flags().IntVarP(&o.Burst, "burst", "b", 1000, "API Burst Rate.")
Expand Down
4 changes: 2 additions & 2 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ require (
github.com/spf13/cobra v1.5.0
github.com/spf13/viper v1.12.0
github.com/vmware-tanzu/velero v1.6.3
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3
golang.org/x/mod v0.6.0
gotest.tools/v3 v3.0.3
k8s.io/api v0.24.2
k8s.io/apimachinery v0.24.2
Expand Down Expand Up @@ -78,7 +78,7 @@ require (
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
golang.org/x/sys v0.1.0 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
Expand Down
Loading