From 8efa5112e10f1d902da3df3b0201d41ed49132b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Oliver=20B=C3=A4hler?= Date: Sun, 25 Feb 2024 15:41:59 +0100 Subject: [PATCH] feat: add cname for projectcapsule.dev --- CNAME | 1 + Makefile | 2 + content/en/_index.md | 9 +- .../en/docs/guides/control-pod-security.md | 256 ++++++++++++++++++ content/en/docs/integrations/dashboard.md | 4 +- .../docs/integrations/managed-kubernetes.md | 160 +++++++++++ content/en/docs/integrations/rancher.md | 7 +- .../en/docs/{tutorial => tenants}/_index.md | 4 +- content/en/docs/tenants/configuration.md | 58 ++++ content/en/docs/tenants/namespaces.md | 122 +++++++++ .../docs/{tutorial => tenants}/permissions.md | 53 +++- content/en/docs/tutorial/configuration.md | 6 - 12 files changed, 657 insertions(+), 25 deletions(-) create mode 100644 CNAME create mode 100644 Makefile create mode 100644 content/en/docs/guides/control-pod-security.md create mode 100644 content/en/docs/integrations/managed-kubernetes.md rename content/en/docs/{tutorial => tenants}/_index.md (86%) create mode 100644 content/en/docs/tenants/configuration.md create mode 100644 content/en/docs/tenants/namespaces.md rename content/en/docs/{tutorial => tenants}/permissions.md (76%) delete mode 100644 content/en/docs/tutorial/configuration.md diff --git a/CNAME b/CNAME new file mode 100644 index 0000000..9a58312 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +projectcapsule.dev \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..47b30d0 --- /dev/null +++ b/Makefile @@ -0,0 +1,2 @@ +apidoc: apidocs-gen + $(APIDOCS_GEN) crdoc --resources config/crd/bases --output docs/content/general/crds-apis.md --template docs/template/reference-cr.tmpl diff --git a/content/en/_index.md b/content/en/_index.md index c6df1ec..52936cc 100644 --- a/content/en/_index.md +++ b/content/en/_index.md @@ -2,11 +2,14 @@ title: Goldydocs --- -{{< blocks/cover title="Project Capsule" image_anchor="top" height="full" >}} +{{< blocks/cover title="Capsule" image_anchor="top" height="full" >}} +# A multi-tenancy and policy-based framework for Kubernetes { class="text-center" } +
Learn More -

A multi-tenancy and policy-based framework for Kubernetes

+ +
{{< blocks/link-down color="info" >}} {{< /blocks/cover >}} @@ -31,7 +34,7 @@ Capsule is a framework is considered a framework which enables you to create you -{{% blocks/section type="row" %}} +{{% blocks/section color="dark" type="row" %}} {{% blocks/feature icon="fa-building-shield" title="Governance" %}} Leverage Kubernetes Admission Controllers to enforce the industry security best practices and meet policy requirements. diff --git a/content/en/docs/guides/control-pod-security.md b/content/en/docs/guides/control-pod-security.md new file mode 100644 index 0000000..366389a --- /dev/null +++ b/content/en/docs/guides/control-pod-security.md @@ -0,0 +1,256 @@ +--- +title: Pod Security +weight: 10 +description: Control the security of the pods running in the tenant namespaces +--- + +In Kubernetes, by default, workloads run with administrative access, which might be acceptable if there is only a single application running in the cluster or a single user accessing it. This is seldom required and you’ll consequently suffer a noisy neighbour effect along with large security blast radiuses. + +Many of these concerns were addressed initially by [PodSecurityPolicies](https://kubernetes.io/docs/concepts/security/pod-security-policy) which have been present in the Kubernetes APIs since the very early days. + +The Pod Security Policies are deprecated in Kubernetes 1.21 and removed entirely in 1.25. As replacement, the [Pod Security Standards](https://kubernetes.io/docs/concepts/security/pod-security-standards/) and [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) has been introduced. Capsule support the new standard for tenants under its control as well as the oldest approach. + + +## Pod Security Standards +One of the issues with Pod Security Policies is that it is difficult to apply restrictive permissions on a granular level, increasing security risk. Also the Pod Security Policies get applied when the request is submitted and there is no way of applying them to pods that are already running. For these, and other reasons, the Kubernetes community decided to deprecate the Pod Security Policies. + +As the Pod Security Policies get deprecated and removed, the [Pod Security Standards](https://kubernetes.io/docs/concepts/security/pod-security-standards/) is used in place. It defines three different policies to broadly cover the security spectrum. These policies are cumulative and range from highly-permissive to highly-restrictive: + +- **Privileged**: unrestricted policy, providing the widest possible level of permissions. +- **Baseline**: minimally restrictive policy which prevents known privilege escalations. +- **Restricted**: heavily restricted policy, following current Pod hardening best practices. + +Kubernetes provides a built-in [Admission Controller](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#podsecurity) to enforce the Pod Security Standards at either: + +1. cluster level which applies a standard configuration to all namespaces in a cluster +2. namespace level, one namespace at a time + +For the first case, the cluster admin has to configure the Admission Controller and pass the configuration to the `kube-apiserver` by mean of the `--admission-control-config-file` extra argument, for example: + +```yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: AdmissionConfiguration +plugins: +- name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1beta1 + kind: PodSecurityConfiguration + defaults: + enforce: "baseline" + enforce-version: "latest" + warn: "restricted" + warn-version: "latest" + audit: "restricted" + audit-version: "latest" + exemptions: + usernames: [] + runtimeClasses: [] + namespaces: [kube-system] +``` + +For the second case, he can just assign labels to the specific namespace he wants enforce the policy since the Pod Security Admission Controller is enabled by default starting from Kubernetes 1.23+: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + labels: + pod-security.kubernetes.io/enforce: baseline + pod-security.kubernetes.io/warn: restricted + pod-security.kubernetes.io/audit: restricted + name: development +``` + +### Capsule +According to the regular Kubernetes segregation model, the cluster admin has to operate either at cluster level or at namespace level. Since Capsule introduces a further segregation level (the _Tenant_ abstraction), the cluster admin can implement Pod Security Standards at tenant level by simply forcing specific labels on all the namespaces created in the tenant. + +As cluster admin, create a tenant with additional labels: + +```yaml +apiVersion: capsule.clastix.io/v1beta2 +kind: Tenant +metadata: + name: solar +spec: + namespaceOptions: + additionalMetadata: + labels: + pod-security.kubernetes.io/enforce: baseline + pod-security.kubernetes.io/audit: restricted + pod-security.kubernetes.io/warn: restricted + owners: + - kind: User + name: alice +``` + +All namespaces created by the tenant owner, will inherit the Pod Security labels: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + labels: + capsule.clastix.io/tenant: solar + kubernetes.io/metadata.name: solar-development + name: solar-development + pod-security.kubernetes.io/enforce: baseline + pod-security.kubernetes.io/warn: restricted + pod-security.kubernetes.io/audit: restricted + name: solar-development + ownerReferences: + - apiVersion: capsule.clastix.io/v1beta2 + blockOwnerDeletion: true + controller: true + kind: Tenant + name: solar +``` + +and the regular Pod Security Admission Controller does the magic: + +```yaml +kubectl --kubeconfig alice-oil.kubeconfig apply -f - << EOF +apiVersion: v1 +kind: Pod +metadata: + name: nginx + namespace: solar-production +spec: + containers: + - image: nginx + name: nginx + ports: + - containerPort: 80 + securityContext: + privileged: true +EOF +``` + +The request gets denied: + +``` +Error from server (Forbidden): error when creating "STDIN": +pods "nginx" is forbidden: violates PodSecurity "baseline:latest": privileged +(container "nginx" must not set securityContext.privileged=true) +``` + +If the tenant owner tries to change o delete the above labels, Capsule will reconcile them to the original tenant manifest set by the cluster admin. + +As additional security measure, the cluster admin can also prevent the tenant owner to make an improper usage of the above labels: + +``` +kubectl annotate tenant solar \ + capsule.clastix.io/forbidden-namespace-labels-regexp="pod-security.kubernetes.io\/(enforce|warn|audit)" +``` + +In that case, the tenant owner gets denied if she tries to use the labels: + +``` +kubectl --kubeconfig alice-solar.kubeconfig label ns solar-production \ + pod-security.kubernetes.io/enforce=restricted \ + --overwrite + +Error from server (Label pod-security.kubernetes.io/audit is forbidden for namespaces in the current Tenant ... +``` + +## Pod Security Policies +As stated in the documentation, *"PodSecurityPolicies enable fine-grained authorization of pod creation and updates. A Pod Security Policy is a cluster-level resource that controls security sensitive aspects of the pod specification. The `PodSecurityPolicy` objects define a set of conditions that a pod must run with in order to be accepted into the system, as well as defaults for the related fields."* + +Using the [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy), the cluster admin can impose limits on pod creation, for example the types of volume that can be consumed, the linux user that the process runs as in order to avoid running things as root, and more. From multi-tenancy point of view, the cluster admin has to control how users run pods in their tenants with a different level of permission on tenant basis. + +Assume the Kubernetes cluster has been configured with [Pod Security Policy Admission Controller](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#podsecuritypolicy) enabled in the APIs server: `--enable-admission-plugins=PodSecurityPolicy` + +The cluster admin creates a `PodSecurityPolicy`: + +```yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp:restricted +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false +``` + +Then create a _ClusterRole_ using or granting the said item + +```yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp:restricted +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + resourceNames: ['psp:restricted'] + verbs: ['use'] +``` + +He can assign this role to all namespaces in a tenant by setting the tenant manifest: + +```yaml +apiVersion: capsule.clastix.io/v1beta2 +kind: Tenant +metadata: + name: solar +spec: + owners: + - name: alice + kind: User + additionalRoleBindings: + - clusterRoleName: psp:privileged + subjects: + - kind: "Group" + apiGroup: "rbac.authorization.k8s.io" + name: "system:authenticated" +``` + +With the given specification, Capsule will ensure that all tenant namespaces will contain a _RoleBinding_ for the specified _Cluster Role_: + +```yaml +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: 'capsule-solar-psp:privileged' + namespace: solar-production + labels: + capsule.clastix.io/tenant: solar +subjects: + - kind: Group + apiGroup: rbac.authorization.k8s.io + name: 'system:authenticated' +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'psp:privileged' +``` + +Capsule admission controller forbids the tenant owner to run privileged pods in `solar-production` namespace and perform privilege escalation as declared by the above Cluster Role `psp:privileged`. + +As tenant owner, creates a namespace: + +``` +kubectl --kubeconfig alice-solar.kubeconfig create ns solar-production +``` + +and create a pod with privileged permissions: + +```yaml +kubectl --kubeconfig alice-solar.kubeconfig apply -f - << EOF +apiVersion: v1 +kind: Pod +metadata: + name: nginx + namespace: solar-production +spec: + containers: + - image: nginx + name: nginx + ports: + - containerPort: 80 + securityContext: + privileged: true +EOF +``` + +Since the assigned `PodSecurityPolicy` explicitly disallows privileged containers, the tenant owner will see her request to be rejected by the Pod Security Policy Admission Controller. \ No newline at end of file diff --git a/content/en/docs/integrations/dashboard.md b/content/en/docs/integrations/dashboard.md index 76f8b1e..b00dfc0 100644 --- a/content/en/docs/integrations/dashboard.md +++ b/content/en/docs/integrations/dashboard.md @@ -1,9 +1,9 @@ --- -title: Kubernetes Dashboard +title: Dashboard --- {{% pageinfo %}} -This guide works with the kubernetes dashboard v2.0.0 ([Chart 6.0.8](https://artifacthub.io/packages/helm/k8s-dashboard/kubernetes-dashboard/6.0.8)) and later. It has not yet been tested successfully with with v3.x version of the dashboard. +This guide works with the kubernetes dashboard v2.0.0 ([Chart 6.0.8](https://artifacthub.io/packages/helm/k8s-dashboard/kubernetes-dashboard/6.0.8)). It has not yet been tested successfully with with v3.x version of the dashboard. {{% /pageinfo %}} This guide describes how to integrate the [Kubernetes Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/) and [Capsule Proxy](/docs/capsule-proxy/) with OIDC authorization. diff --git a/content/en/docs/integrations/managed-kubernetes.md b/content/en/docs/integrations/managed-kubernetes.md new file mode 100644 index 0000000..0cfd85d --- /dev/null +++ b/content/en/docs/integrations/managed-kubernetes.md @@ -0,0 +1,160 @@ +--- +title: Managed Kubernetes +weight: 10 +description: Capsule on managed Kubernetes offerings +--- + +Capsule Operator can be easily installed on a Managed Kubernetes Service. Since you do not have access to the Kubernetes APIs Server, you should check with the provider of the service: + +the default cluster-admin ClusterRole is accessible +the following Admission Webhooks are enabled on the APIs Server: + +* `PodNodeSelector` +* `LimitRanger` +* `ResourceQuota` +* `MutatingAdmissionWebhook` +* `ValidatingAdmissionWebhook` + + +## AWS EKS + +This is an example of how to install AWS EKS cluster and one user manged by Capsule. It is based on [Using IAM Groups to manage Kubernetes access](https://www.eksworkshop.com/beginner/091_iam-groups/intro/) + +Create EKS cluster: + +```bash +export AWS_DEFAULT_REGION="eu-west-1" +export AWS_ACCESS_KEY_ID="xxxxx" +export AWS_SECRET_ACCESS_KEY="xxxxx" + +eksctl create cluster \ +--name=test-k8s \ +--managed \ +--node-type=t3.small \ +--node-volume-size=20 \ +--kubeconfig=kubeconfig.conf +``` + +Create AWS User alice using CloudFormation, create AWS access files and kubeconfig for such user: + +```bash +cat > cf.yml << EOF +Parameters: + ClusterName: + Type: String +Resources: + UserAlice: + Type: AWS::IAM::User + Properties: + UserName: !Sub "alice-${ClusterName}" + Policies: + - PolicyName: !Sub "alice-${ClusterName}-policy" + PolicyDocument: + Version: "2012-10-17" + Statement: + - Sid: AllowAssumeOrganizationAccountRole + Effect: Allow + Action: sts:AssumeRole + Resource: !GetAtt RoleAlice.Arn + AccessKeyAlice: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref UserAlice + RoleAlice: + Type: AWS::IAM::Role + Properties: + Description: !Sub "IAM role for the alice-${ClusterName} user" + RoleName: !Sub "alice-${ClusterName}" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + AWS: !Sub "arn:aws:iam::${AWS::AccountId}:root" + Action: sts:AssumeRole +Outputs: + RoleAliceArn: + Description: The ARN of the Alice IAM Role + Value: !GetAtt RoleAlice.Arn + Export: + Name: + Fn::Sub: "${AWS::StackName}-RoleAliceArn" + AccessKeyAlice: + Description: The AccessKey for Alice user + Value: !Ref AccessKeyAlice + Export: + Name: + Fn::Sub: "${AWS::StackName}-AccessKeyAlice" + SecretAccessKeyAlice: + Description: The SecretAccessKey for Alice user + Value: !GetAtt AccessKeyAlice.SecretAccessKey + Export: + Name: + Fn::Sub: "${AWS::StackName}-SecretAccessKeyAlice" +EOF + +eval aws cloudformation deploy --capabilities CAPABILITY_NAMED_IAM \ + --parameter-overrides "ClusterName=test-k8s" \ + --stack-name "test-k8s-users" --template-file cf.yml + +AWS_CLOUDFORMATION_DETAILS=$(aws cloudformation describe-stacks --stack-name "test-k8s-users") +ALICE_ROLE_ARN=$(echo "${AWS_CLOUDFORMATION_DETAILS}" | jq -r ".Stacks[0].Outputs[] | select(.OutputKey==\"RoleAliceArn\") .OutputValue") +ALICE_USER_ACCESSKEY=$(echo "${AWS_CLOUDFORMATION_DETAILS}" | jq -r ".Stacks[0].Outputs[] | select(.OutputKey==\"AccessKeyAlice\") .OutputValue") +ALICE_USER_SECRETACCESSKEY=$(echo "${AWS_CLOUDFORMATION_DETAILS}" | jq -r ".Stacks[0].Outputs[] | select(.OutputKey==\"SecretAccessKeyAlice\") .OutputValue") + +eksctl create iamidentitymapping --cluster="test-k8s" --arn="${ALICE_ROLE_ARN}" --username alice --group capsule.clastix.io + +cat > aws_config << EOF +[profile alice] +role_arn=${ALICE_ROLE_ARN} +source_profile=alice +EOF + +cat > aws_credentials << EOF +[alice] +aws_access_key_id=${ALICE_USER_ACCESSKEY} +aws_secret_access_key=${ALICE_USER_SECRETACCESSKEY} +EOF + +eksctl utils write-kubeconfig --cluster=test-k8s --kubeconfig="kubeconfig-alice.conf" +cat >> kubeconfig-alice.conf << EOF + - name: AWS_PROFILE + value: alice + - name: AWS_CONFIG_FILE + value: aws_config + - name: AWS_SHARED_CREDENTIALS_FILE + value: aws_credentials +EOF +``` + +Export "admin" kubeconfig to be able to install Capsule: + +```bash +export KUBECONFIG=kubeconfig.conf +``` + +[Install Capsule](/docs/getting-started#install) and create a tenant where alice has ownership. Use the default Tenant example: + +```bash +kubectl apply -f https://raw.githubusercontent.com/clastix/capsule/master/config/samples/capsule_v1beta1_tenant.yaml +``` + +Based on the tenant configuration above the user alice should be able to create namespace. Switch to a new terminal and try to create a namespace as user alice: + +```bash +# Unset AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY if defined +unset AWS_ACCESS_KEY_ID +unset AWS_SECRET_ACCESS_KEY +kubectl create namespace test --kubeconfig="kubeconfig-alice.conf" +``` + +## Azure AKS + +This reference implementation introduces the recommended starting (baseline) infrastructure architecture for implementing a multi-tenancy Azure AKS cluster using Capsule. [See CoAKS](https://github.com/clastix/coaks-baseline-architecture). + + +## Charmed Kubernetes + +[Canonical Charmed Kubernetes](https://github.com/charmed-kubernetes) is a Kubernetes distribution coming with out-of-the-box tools that support deployments and operational management and make microservice development easier. Combined with Capsule, Charmed Kubernetes allows users to further reduce the operational overhead of Kubernetes setup and management. + +The Charm package for Capsule is available to Charmed Kubernetes users via [Charmhub.io](https://charmhub.io/capsule-k8s). diff --git a/content/en/docs/integrations/rancher.md b/content/en/docs/integrations/rancher.md index c4bc7f0..dcd379b 100644 --- a/content/en/docs/integrations/rancher.md +++ b/content/en/docs/integrations/rancher.md @@ -26,4 +26,9 @@ Capsule allows tenants isolation and resources control in a declarative way, whi You can read in detail how the integration works and how to configure it, in the following guides. How to integrate Rancher Projects with Capsule Tenants -How to enable cluster-wide resources and Rancher shell access. \ No newline at end of file +How to enable cluster-wide resources and Rancher shell access. + +## Tenants and Projects + + + diff --git a/content/en/docs/tutorial/_index.md b/content/en/docs/tenants/_index.md similarity index 86% rename from content/en/docs/tutorial/_index.md rename to content/en/docs/tenants/_index.md index f3f5702..ccde9e7 100644 --- a/content/en/docs/tutorial/_index.md +++ b/content/en/docs/tenants/_index.md @@ -1,10 +1,10 @@ --- -title: Tutorial +title: Tenants weight: 4 description: > Understand principles and concepts of Capsule Tenants --- -Capsule is a framework to implement multi-tenant and policy-driven scenarios in Kubernetes. In this tutorial, we'll focus on a hypothetical case covering the main features of the Capsule Operator. +Capsule is a framework to implement multi-tenant and policy-driven scenarios in Kubernetes. In this tutorial, we'll focus on a hypothetical case covering the main features of the Capsule Operator. This documentation is styled in a tutorial format, and it's designed to be read in sequence. We'll start with the basics and then move to more advanced topics. **Acme Corp**, our sample organization, is building a Container as a Service platform (CaaS) to serve multiple lines of business, or departments, e.g. Oil, Gas, Solar, Wind, Water. Each department has its team of engineers that are responsible for the development, deployment, and operating of their digital products. We'll work with the following actors: diff --git a/content/en/docs/tenants/configuration.md b/content/en/docs/tenants/configuration.md new file mode 100644 index 0000000..620468e --- /dev/null +++ b/content/en/docs/tenants/configuration.md @@ -0,0 +1,58 @@ +--- +title: Configuration +weight: 20 +description: > + Understand the Capsule configuration options and how to use them. +--- + +The configuration for the capsule controller is done via it's dedicated configration Custom Resource. You can explain the configuration options and how to use them: + + + +## CapsuleConfiguration + +The configuration for Capsule is done via it's dedicated configration Custom Resource. You can explain the configuration options and how to use them: + +```bash +kubectl explain capsuleConfiguration.spec +``` + +### enableTLSReconciler +Toggles the TLS reconciler, the controller that is able to generate CA and certificates for the webhooks when not using an already provided CA and certificate, or when these are managed externally with Vault, or cert-manager. + +### forceTenantPrefix +Enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash. This is useful to avoid Namespace name collision in a public CaaS environment. + +### nodeMetadata +Allows to set the forbidden metadata for the worker nodes that could be patched by a Tenant. This applies only if the Tenant has an active NodeSelector, and the Owner have right to patch their nodes. + +### overrides +Allows to set different name rather than the canonical one for the Capsule configuration objects, such as webhook secret or configurations. + +### protectedNamespaceRegex +Disallow creation of namespaces, whose name matches this regexp + +### userGroups +Names of the groups for Capsule users. Users must have this group to be considered for the Capsule tenancy. If a user does not have any group mentioned here, they are not recognized as a Capsule user. + + +## Controller Options + +Depending on the version of the Capsule Controller, the configuration options may vary. You can view the options for the latest version of the Capsule Controller [here]() or by executing the controller locally: + +```bash +$ docker run ghcr.io/projectcapsule/capsule:v0.6.0-rc0 -h +2024/02/25 13:21:21 maxprocs: Leaving GOMAXPROCS=4: CPU quota undefined +Usage of /ko-app/capsule: + --configuration-name string The CapsuleConfiguration resource name to use (default "default") + --enable-leader-election Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. + --metrics-addr string The address the metric endpoint binds to. (default ":8080") + --version Print the Capsule version and exit + --webhook-port int The port the webhook server binds to. (default 9443) + --zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) + --zap-encoder encoder Zap log encoding (one of 'json' or 'console') + --zap-log-level level Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity + --zap-stacktrace-level level Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic'). + --zap-time-encoding time-encoding Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'. +``` + diff --git a/content/en/docs/tenants/namespaces.md b/content/en/docs/tenants/namespaces.md new file mode 100644 index 0000000..3965219 --- /dev/null +++ b/content/en/docs/tenants/namespaces.md @@ -0,0 +1,122 @@ +--- +title: Namespaces +weight: 2 +description: > + Assign Namespace to tenants +--- + +The configuration for the capsule controller is done via it's dedicated configration Custom Resource. You can explain the configuration options and how to use them: + +``` + + +``` + + +## Create Namespaces + +Alice, once logged with her credentials, can create a new namespace in her tenant, as simply issuing: + + +## Multiple Tenants + +A single team is likely responsible for multiple lines of business. For example, in our sample organization Acme Corp., Alice is responsible for both the Solar and Green lines of business. It's more likely that Alice requires two different tenants, for example, solar and green to keep things isolated. + +By design, the Capsule operator does not permit a hierarchy of tenants, since all tenants are at the same levels. However, we can assign the ownership of multiple tenants to the same user or group of users. + +Bill, the cluster admin, creates multiple tenants having alice as owner: + +```yaml +apiVersion: capsule.clastix.io/v1beta2 +kind: Tenant +metadata: + name: solar +spec: + owners: + - name: alice + kind: User +``` + +and + +```yaml +apiVersion: capsule.clastix.io/v1beta2 +kind: Tenant +metadata: + name: green +spec: + owners: + - name: alice + kind: User +``` + +Alternatively, the ownership can be assigned to a group called solar-and-green for both tenants: + +```yaml +apiVersion: capsule.clastix.io/v1beta2 +kind: Tenant +metadata: + name: solar +spec: + owners: + - name: solar-and-green + kind: Group +``` + +> See [Ownership](/docs/tenants/permissions#ownership) for more details on how to assign ownership to a group of users. + +The two tenants remain isolated from each other in terms of resources assignments, e.g. `ResourceQuotas`, `Nodes`, `StorageClasses` and `IngressClasses`, and in terms of governance, e.g. `NetworkPolicies`, `PodSecurityPolicies`, `Trusted Registries`, etc. + +When Alice logs in, she has access to all namespaces belonging to both the solar and green tenants. + + + + +### Tenant Prefix + +> We recommend to use the [forceTenantPrefix](/docs/tenants/configuration/#forcetenantprefix) for production environments. + +If the [forceTenantPrefix](/docs/tenants/configuration/#forcetenantprefix) option is enabled, which is **not** the case by default, the namespaces are automatically assigned to the right tenant by Capsule because the operator does a lookup on the tenant names. + +For example, Alice creates a namespace called `solar-production` and `green-production`: + +```bash +kubectl create ns solar-production +kubectl create ns green-production +``` + +And they are assigned to the tenant based on their prefix: + +```bash +$ kubectl get tnt +NAME STATE NAMESPACE QUOTA NAMESPACE COUNT NODE SELECTOR AGE +green Active 1 3m26s +solar Active 1 3m26s +``` + +However alice can create any namespace, which does not have a prefix of any of the tenants she owns, for example `production`: + +```bash +$ kubectl create ns production +Error from server (Forbidden): admission webhook "owner.namespace.capsule.clastix.io" denied the request: The Namespace prefix used doesn't match any available Tenant +``` + +### Label + +The default behavior, if the [forceTenantPrefix](/docs/tenants/configuration/#forcetenantprefix) option is not enabled, Alice needs to specify the tenant name as a label capsule.`clastix.io/tenant=` in the namespace manifest: + +```yaml +kind: Namespace +apiVersion: v1 +metadata: + name: solar-production + labels: + capsule.clastix.io/tenant: solar +``` + +If not specified, Capsule will deny with the following message: Unable to assign namespace to tenant: + +```bash +$ kubectl create ns solar-production +Error from server (Forbidden): admission webhook "owner.namespace.capsule.clastix.io" denied the request: Please use capsule.clastix.io/tenant label when creating a namespace +``` diff --git a/content/en/docs/tutorial/permissions.md b/content/en/docs/tenants/permissions.md similarity index 76% rename from content/en/docs/tutorial/permissions.md rename to content/en/docs/tenants/permissions.md index 2063c2f..28199c1 100644 --- a/content/en/docs/tutorial/permissions.md +++ b/content/en/docs/tenants/permissions.md @@ -7,7 +7,7 @@ description: > ## Ownership -Capsule introduces the principal, that tenants must have owners. The owner of a tenant is a user or a group of users that have the right to create, delete, and manage the tenant's namespaces and other tenant resources. However an owner does not have the permissions to manage the tenants they are owner of. This is still done by cluster-administrators. +Capsule introduces the principal, that tenants must have owners. The owner of a tenant is a user or a group of users that have the right to create, delete, and manage the [tenant's namespaces](/docs/tenants/namespaces) and other tenant resources. However an owner does not have the permissions to manage the tenants they are owner of. This is still done by cluster-administrators. ### Group Scope @@ -16,7 +16,11 @@ Capsule selects users, which are eligable to be considered for tenancy by their -### Assigning Ownership to Users +### Assignment + +Learn how to assign ownership to users, groups and serviceaccounts. + +#### Assigning Ownership to Users **Bill**, the cluster admin, receives a new request from Acme Corp's CTO asking for a new tenant to be onboarded and Alice user will be the tenant owner. Bill then assigns Alice's identity of alice in the Acme Corp. identity management system. Since Alice is a tenant owner, Bill needs to assign alice the Capsule group defined by --capsule-user-group option, which defaults to capsule.clastix.io. @@ -25,7 +29,6 @@ To keep things simple, we assume that Bill just creates a client certificate for **Bill** creates a new tenant oil in the CaaS management portal according to the tenant's profile: ```yaml -kubectl create -f - << EOF apiVersion: capsule.clastix.io/v1beta2 kind: Tenant metadata: @@ -34,7 +37,6 @@ spec: owners: - name: alice kind: User -EOF ``` **Bill** checks if the new tenant is created and operational: @@ -87,8 +89,7 @@ no In the example above, Bill assigned the ownership of solar tenant to alice user. If another user, e.g. Bob needs to administer the solar tenant, Bill can assign the ownership of solar tenant to such user too: -```bash -kubectl apply -f - << EOF +```yaml apiVersion: capsule.clastix.io/v1beta2 kind: Tenant metadata: @@ -99,13 +100,11 @@ spec: kind: User - name: bob kind: User -EOF ``` However, it's more likely that Bill assigns the ownership of the solar tenant to a group of users instead of a single one, especially if you use [OIDC AUthentication](/docs/guides/authentication#oidc). Bill creates a new group account solar-users in the Acme Corp. identity management system and then he assigns Alice and Bob identities to the solar-users group. -```bash -kubectl apply -f - << EOF +```yaml apiVersion: capsule.clastix.io/v1beta2 kind: Tenant metadata: @@ -114,7 +113,6 @@ spec: owners: - name: solar-users kind: Group -EOF ``` With the configuration above, any user belonging to the `solar-users` group will be the owner of the oil tenant with the same permissions of Alice. For example, Bob can log in with his credentials and issue @@ -132,8 +130,41 @@ You can use the Group subject to grant serviceaccounts the ownership of a tenant ``` +### Owner Roles + +By default, all Tenant Owners will be granted with two ClusterRole resources using the RoleBinding API: + +1. `admin`: the Kubernetes default one, admin, that grants most of the namespace scoped resources +2. `capsule-namespace-deleter`: a custom clusterrole, created by Capsule, allowing to delete the created namespaces + +You can observe this behavior when you get the tenant solar: + +```yaml + +``` + +In the example below, assuming the tenant owner creates a namespace oil-production in Tenant oil, you'll see the Role Bindings giving the tenant owner full permissions on the tenant namespaces: + + + +#### Role Aggregation + +Sometimes the `admin` role is missing certain permissions. You can [aggregate](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) the `admin` role with a custom role, for example, `prometheus-viewer`: + +```yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: prometheus-viewer + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: +- apiGroups: ["monitoring.coreos.com"] + resources: ["servicemonitors"] + verbs: ["get", "watch"] +``` -## Rolebindings +## Additional Rolebindings With tenant rolebindings you can distribute namespaced rolebindings to all namespaces which are assigned to a namespace. Essentially it is then ensured the defined rolebindings are present and reconciled in all namespaces of the tenant. This is useful if users should have more insights on tenant basis. Let's look at an example. diff --git a/content/en/docs/tutorial/configuration.md b/content/en/docs/tutorial/configuration.md deleted file mode 100644 index 7eade74..0000000 --- a/content/en/docs/tutorial/configuration.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Configuration -weight: 20 -description: > - Understand the Capsule configuration options and how to use them. ----