diff --git a/.github/actions/build-website/action.yml b/.github/actions/build-website/action.yml index d1b6cef07..144014960 100644 --- a/.github/actions/build-website/action.yml +++ b/.github/actions/build-website/action.yml @@ -30,11 +30,6 @@ runs: role-to-assume: ${{ inputs.iam_role_arn }} role-session-name: ${{ inputs.iam_role_session_name }} - - name: Checkout Repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Setup Node uses: actions/setup-node@v4 with: diff --git a/.github/workflows/website-deploy-preview.yml b/.github/workflows/website-deploy-preview.yml index a2c5e5adc..3dc8d7986 100644 --- a/.github/workflows/website-deploy-preview.yml +++ b/.github/workflows/website-deploy-preview.yml @@ -29,6 +29,10 @@ permissions: id-token: write contents: read +concurrency: + group: "docs-preview-${{ github.event.pull_request.number }}" + cancel-in-progress: true + jobs: deploy-preview: runs-on: ubuntu-latest @@ -42,6 +46,8 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + # This workflows runs on pull_request_target, so we need to checkout the PR branch + ref: ${{ github.event.pull_request.head.ref }} - name: Build Website uses: ./.github/actions/build-website diff --git a/docs/intro/intro.mdx b/docs/intro/intro.mdx index 723107fac..85940f550 100644 --- a/docs/intro/intro.mdx +++ b/docs/intro/intro.mdx @@ -107,7 +107,7 @@ With SweetOps you can implement the following complex architectural patterns wit ## What are the alternatives? -The reference archietcture is comparable to various other solutions that bundle ready-to-go Terraform "templates" and offer subscription plans for access to their modules. +The reference architecture is comparable to various other solutions that bundle ready-to-go Terraform "templates" and offer subscription plans for access to their modules. How does it differentiate from these solutions? diff --git a/docs/jumpstart/action-items.mdx b/docs/jumpstart/action-items.mdx index 9d1678dc5..ec177ea22 100644 --- a/docs/jumpstart/action-items.mdx +++ b/docs/jumpstart/action-items.mdx @@ -63,7 +63,7 @@ Before we can get started, here's the minimum information we need from you. Please also provision a single test user in your IdP for Cloud Posse to use for testing and add those user credentials to 1Password. - - [AWS Identity Center (SSO) ClickOps](/layers/identity/aws-sso/) + - [Setup AWS Identity Center (SSO)](/layers/identity/aws-sso/) - GSuite does not automatically sync Users and Groups with AWS Identity Center without additional configuration! If using GSuite as an IdP, considering deploying the [ssosync tool](https://github.com/awslabs/ssosync). @@ -76,10 +76,9 @@ Before we can get started, here's the minimum information we need from you. If deploying AWS SAML as an alternative to AWS SSO, we will need a separate configuration and metadata file. Again, please refer to the relevant linked guide. - - [GSuite](https://aws.amazon.com/blogs/desktop-and-application-streaming/setting-up-g-suite-saml-2-0-federation-with-amazon-appstream-2-0/): Follow Steps 1 through 7. This document refers to Appstream, but the process will be the same for AWS. - - [Office 365](/layers/identity/tutorials/how-to-setup-saml-login-to-aws-from-office-365) - - [JumpCloud](https://support.jumpcloud.com/support/s/article/getting-started-applications-saml-sso2) - - [Okta](https://help.okta.com/en-us/Content/Topics/DeploymentGuides/AWS/aws-configure-identity-provider.htm) + Please see the following guide and follow the steps to export metadata for your Identity Provider integration. All steps in AWS will be handled by Cloud Posse. + + - [Setup AWS SAML](/layers/identity/aws-saml/) diff --git a/docs/layers/accounts/design-decisions/decide-on-aws-organization-strategy.mdx b/docs/layers/accounts/design-decisions/decide-on-aws-organization-strategy.mdx index b0284021c..75d74bd0c 100644 --- a/docs/layers/accounts/design-decisions/decide-on-aws-organization-strategy.mdx +++ b/docs/layers/accounts/design-decisions/decide-on-aws-organization-strategy.mdx @@ -22,7 +22,7 @@ Cloud Posse recommends starting with a **Net-New Organization** - Only one AWS Control Tower can exist in an organization. -- AWS Control Tower only recenlty became managable with Terraform, and full support is not availble. +- AWS Control Tower only recently became manageable with Terraform, and full support is not available. Depending on the Scope of Work, Cloud Posse is usually responsible for provisioning accounts with terraform which requires all the same access as Control Tower. - Member accounts can only be provisioned from the top-level root “organization” account diff --git a/docs/layers/accounts/tutorials/manual-configuration.mdx b/docs/layers/accounts/tutorials/manual-configuration.mdx index 4370c70f8..55a0980e7 100644 --- a/docs/layers/accounts/tutorials/manual-configuration.mdx +++ b/docs/layers/accounts/tutorials/manual-configuration.mdx @@ -675,7 +675,7 @@ stacks/orgs/(namespace)/(tenant)/identity/global-region.yaml and add the arn: ``` import: - - orgs/e98s/gov/iam/_defaults + - orgs/acme/gov/iam/_defaults - mixins/region/global-region #... @@ -694,7 +694,7 @@ If the auto account id is not known, create an empty list instead: ``` import: - - orgs/e98s/gov/iam/_defaults + - orgs/acme/gov/iam/_defaults - mixins/region/global-region #... diff --git a/docs/layers/ecs/tutorials/1password-scim-bridge.mdx b/docs/layers/ecs/tutorials/1password-scim-bridge.mdx new file mode 100644 index 000000000..eeaaffd88 --- /dev/null +++ b/docs/layers/ecs/tutorials/1password-scim-bridge.mdx @@ -0,0 +1,135 @@ +--- +title: "Deploy 1Password SCIM Bridge" +sidebar_label: "1Password SCIM Bridge" +description: "Deploy the 1Password SCIM Bridge for ECS environments" +--- + +import Intro from "@site/src/components/Intro"; +import Steps from "@site/src/components/Steps"; +import Step from "@site/src/components/Step"; +import StepNumber from "@site/src/components/StepNumber"; +import CollapsibleText from "@site/src/components/CollapsibleText"; + + + The 1Password SCIM Bridge is a service that allows you to automate the management of users and groups in 1Password. This guide will walk you through deploying the SCIM Bridge for ECS environments. + + +## Implementation + +The implementation of this is fairly simple. We will generate credentials for the SCIM bridge in 1Password, store them in AWS SSM Parameter Store, deploy the SCIM bridge ECS service, and then finally connect your chosen identity provider. + + + + ### Generate Credentials for your SCIM bridge in 1Password + + The first step is to generate credentials for your SCIM bridge in 1Password. We will pass these credentials to Terraform and the ECS task definition to create the SCIM bridge. + + + 1. Log in to your 1Password account + 1. Click Integrations in the sidebar + 1. Select "Set up user provisioning" + 1. Choose "Custom" + 1. You should now see the SCIM bridge credentials. We will need the "scimsession" and "Bearer Token" for the next steps. + 1. Save these credentials in a secure location (such as 1Password) for future reference + 1. Store only the "scimsession" in AWS SSM Parameter Store. This will allow the ECS task definition to access the credentials securely. Then once the service is running, the server will ask for the bearer token to verify the connection, which we will enter at that time. + + + - Open the AWS Web Console - Navigate to the target account, such as `core-auto`, and target region, such as `us-west-2` + - Open "AWS System Manager" > "Parameter Store" + - Create a new Secure String parameter using the credentials you generated in the previous step: `/1password/scim/scimsession` + + + + There will be additional steps to complete the integration in 1Password, but first we need to deploy the SCIM bridge service. + + + + ### Deploy the SCIM bridge ECS Service + + The next step is to deploy the SCIM bridge ECS service. We will use Terraform to create the necessary resources with our existing `ecs-service` component. Ensure you have the `ecs-service` component and `ecs` cluster before proceeding. + + If you do not have ECS prerequisites, please see the [ECS layer](/layers/ecs) to create the necessary resources. + + + 1. Create a new stack configuration for the SCIM bridge. The placement of this file will depend on your project structure. For example, you could create a new file such as `stacks/catalog/ecs-services/1password-scim-bridge.yaml` with the following content: + + + ```yaml + import: + - catalog/terraform/services/defaults + + components: + terraform: + 1pass-scim: + metadata: + component: ecs-service + inherits: + - ecs-service/defaults + vars: + enabled: true + name: 1pass-scim + containers: + service: + name: op_scim_bridge + image: 1password/scim:v2.9.5 + cpu: 128 + memory: 512 + essential: true + dependsOn: + - containerName: redis + condition: START + port_mappings: + - containerPort: 3002 + hostPort: 3002 + protocol: tcp + map_environment: + OP_REDIS_URL: redis://localhost:6379 + OP_TLS_DOMAIN: "" + OP_CONFIRMATION_INTERVAL: "300" + map_secrets: + OP_SESSION: "1password/scim/scimsession" + log_configuration: + logDriver: awslogs + options: {} + redis: + name: redis + image: redis:latest + cpu: 128 + memory: 512 + essential: true + restart: always + port_mappings: + - containerPort: 6379 + hostPort: 6379 + protocol: tcp + map_environment: + REDIS_ARGS: "--maxmemory 256mb --maxmemory-policy volatile-lru" + log_configuration: + logDriver: awslogs + options: {} + ``` + + 2. Confirm the `map_secrets` value for `OP_SESSION` matches the AWS SSM Parameter Store path you created previously, an confirm they are in the same account and region as this ECS service component. + 3. Deploy the ECS service with Atmos: + ```bash + atmos terraform apply 1pass-scim -s core-usw2-auto + ``` + + + + + ### Validate the Integration + + After deploying the SCIM bridge ECS service, verify the service is running and accessible. Connect to the VPN (if deployed the ECS service is deployed with a private ALB), navigate to the SCIM bridge URL, and confirm the service is running. + + For example, go to `https://1pass-scim.platform.usw1.auto.core.acme-svc.com/` + + + + ### Connect your Identity Provider + + Finally, connect your identity provider to the SCIM bridge. The SCIM bridge URL will be the URL you validated in the previous step. Follow the instructions in the 1Password SCIM Bridge documentation to connect your identity provider, using the Bearer Token you generated in the first step. + + + + diff --git a/docs/layers/eks/design-decisions/decide-on-secrets-management-for-eks.md b/docs/layers/eks/design-decisions/decide-on-secrets-management-for-eks.md new file mode 100644 index 000000000..8e9320ab8 --- /dev/null +++ b/docs/layers/eks/design-decisions/decide-on-secrets-management-for-eks.md @@ -0,0 +1,62 @@ +--- +title: "Decide on Secrets Management for EKS" +sidebar_label: "Secrets Management for EKS" +description: Decide on the secrets management strategy for EKS. +--- +import Intro from '@site/src/components/Intro'; +import KeyPoints from '@site/src/components/KeyPoints'; + + +We need to decide on a secrets management strategy for EKS. We prefer storing secrets externally, like in AWS SSM Parameter Store, to keep clusters more disposable. If we decide on this, we'll need a way to pull these secrets into Kubernetes. + + +## Problem + +We aim to design our Kubernetes clusters to be disposable and ephemeral, treating them like cattle rather than pets. This influences how we manage secrets. Ideally, Kubernetes should not be the sole source of truth for secrets, though we still want to leverage Kubernetes’ native `Secret` resource. If the cluster experiences a failure, storing secrets exclusively within Kubernetes risks losing access to them. Additionally, keeping secrets only in Kubernetes limits integration with other services. + +To address this, several solutions allow secrets to be stored externally (as the source of truth) while still utilizing Kubernetes' `Secret` resources. These solutions, including some open-source tools and recent offerings from Amazon, enhance resilience and interoperability. Any approach must respect IAM permissions and ensure secure secret management for applications running on EKS. We have several options to consider that balance external secret storage with Kubernetes-native functionality. + +### Option 1: External Secrets Operator + +Use [External Secrets Operator](https://external-secrets.io/latest/) with AWS SSM Parameter Store. + +External Secrets Operator is a Kubernetes operator that manages and stores sensitive information in external secret management systems like AWS Secrets Manager, GCP Secret Manager, Azure Key Vault, HashiCorp Vault, and more. It allows you to use these external secret management systems to securely add secrets in your Kubernetes cluster. + +Cloud Posse historically recommends using External Secrets Operator with AWS SSM Parameter Store and has existing Terraform modules to support this solution. See the [eks/external-secrets-operator](/components/library/aws/eks/external-secrets-operator/) component. + +### Option 2: AWS Secrets Manager secrets with Kubernetes Secrets Store CSI Driver + +Use [AWS Secrets and Configuration Provider (ASCP) for the Kubernetes Secrets Store CSI Driver](https://docs.aws.amazon.com/secretsmanager/latest/userguide/integrating_csi_driver.html). This option allows you to use AWS Secrets Manager secrets as Kubernetes secrets that can be accessed by Pods as environment variables or files mounted in the pods. The ASCP also works with [Parameter Store parameters](https://docs.aws.amazon.com/systems-manager/latest/userguide/integrating_csi_driver.html) + +However, Cloud Posse does not have existing Terraform modules for this solution. We would need to build this support. + +### Option 3: SOPS Operator + +Use [SOPS Operator](https://github.com/isindir/sops-secrets-operator) to manage secrets in Kubernetes. SOPS Operator is a Kubernetes operator that builds on the `sops` project by Mozilla to encrypt the sensitive portions of a `Secrets` manifest into a `SopsSecret` resource, and then decrypt and provision `Secrets` in the Kubernetes cluster. + +1. **Mozilla SOPS Encryption**: Mozilla SOPS (Secrets OPerationS) is a tool that encrypts Kubernetes secret manifests, allowing them to be stored securely in Git repositories. SOPS supports encryption using a variety of key management services. Most importantly, it supports AWS KMS which enables IAM capabilities for native integration with AWS. + +2. **GitOps-Compatible Secret Management**: In a GitOps setup, storing plain-text secrets in Git poses security risks. Using SOPS, we can encrypt sensitive data in Kubernetes secret manifests while keeping the rest of the manifest in clear text. This allows us to store encrypted secrets in Git, track changes with diffs, and maintain security while benefiting from GitOps practices like version control, auditability, and CI/CD pipelines. + +3. **AWS KMS Integration**: SOPS uses AWS KMS to encrypt secrets with customer-managed keys (CMKs), ensuring only authorized users—based on IAM policies—can decrypt them. The encrypted secret manifests can be safely committed to Git, with AWS securely managing the keys. Since it's IAM-based, it integrates seamlessly with STS tokens, allowing secrets to be decrypted inside the cluster without hardcoded credentials. + +4. **Kubernetes Operator**: The [SOPS Secrets Operator](https://github.com/isindir/sops-secrets-operator) automates the decryption and management of Kubernetes secrets. It monitors a `SopsSecret` resource containing encrypted secrets. When a change is detected, the operator decrypts the secrets using AWS KMS and generates a native Kubernetes `Secret`, making them available to applications in the cluster. AWS KMS uses envelope encryption to manage the encryption keys, ensuring that secrets remain securely encrypted at rest. + +5. **Improved Disaster Recovery and Security**: By storing the source of truth for secrets outside of Kubernetes (e.g., in Git), this setup enhances disaster recovery, ensuring secrets remain accessible even if the cluster is compromised or destroyed. While secrets are duplicated across multiple locations, security is maintained by using IAM for encryption and decryption outside Kubernetes, and Kubernetes' native Role-Based Access Control (RBAC) model for managing access within the cluster. This ensures that only authorized entities, both external and internal to Kubernetes, can access the secrets. + +The SOPS Operator combines the strengths of Mozilla SOPS and AWS KMS, allowing you to: +- Encrypt secrets using KMS keys. +- Store encrypted secrets in Git repositories. +- Automatically decrypt and manage secrets in Kubernetes using the SOPS Operator. + +This solution is ideal for teams following GitOps principles, offering secure, external management of sensitive information while utilizing Kubernetes' secret management capabilities. However, the redeployment required for secret rotation can be heavy-handed, potentially leading to a period where services are still using outdated or invalid secrets. This could cause services to fail until the new secrets are fully rolled out. + +## Recommendation + +We recommend using the External Secrets Operator with AWS SSM Parameter Store. This is a well-tested solution that we have used in the past. We have existing Terraform modules to support this solution. + +However, we are in the process of evaluating the AWS Secrets Manager secrets with Kubernetes Secrets Store CSI Driver solution. This is the AWS supported option and may be a better long-term solution. We will build the required Terraform component to support this solution. + +## Consequences + +We will develop the `eks/secrets-store-csi-driver` component using the [Secrets Store CSI Driver](https://secrets-store-csi-driver.sigs.k8s.io/getting-started/installation) diff --git a/docs/layers/eks/foundational-platform.mdx b/docs/layers/eks/foundational-platform.mdx index 0b51d57cf..566c1b8f2 100644 --- a/docs/layers/eks/foundational-platform.mdx +++ b/docs/layers/eks/foundational-platform.mdx @@ -16,7 +16,7 @@ We first deploy the foundation for the cluster. The `eks/cluster` component depl including Auth Config mapping. We do not deploy any nodes with the cluster initially. Then once EKS is available, we connect to the cluster and start deploying resources. First is Karpenter. We deploy the Karpenter chart on a Fargate node and the IAM service role to allow Karpenter to purchase Spot Instances. Karpenter is the only resources that will -be deployed to Fargate. Then we deploy Karpenter Provisioners using the CRD created by the initial Karpenter component. +be deployed to Fargate. Then we deploy Karpenter Node Pools using the CRD created by the initial Karpenter component. These provisioners will automatically launch and scale the cluster to meet our demands. Next we deploy `idp-roles` to manage custom roles for the cluster, and deploy `metrics-server` to provide access to resource metrics. @@ -49,7 +49,7 @@ those implementations in follow up topics. For details, see the EKS Cluster, including IAM role to Kubernetes Auth Config mapping. - [`eks/karpenter`](/components/library/aws/eks/karpenter/): Installs the Karpenter chart on the EKS cluster and prepares the environment for provisioners. - - [`eks/karpenter-provisioner`](/components/library/aws/eks/karpenter-node-pool/): Deploys Karpenter Provisioners + - [`eks/karpenter-provisioner`](/components/library/aws/eks/karpenter-node-pool/): Deploys Karpenter Node Pools using CRDs made available by `eks/karpenter` - [`iam-service-linked-roles`](/components/library/aws/iam-service-linked-roles/): Provisions [IAM Service-Linked](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html) roles. These diff --git a/docs/layers/github-actions/design-decisions/decide-on-self-hosted-runner-architecture.mdx b/docs/layers/github-actions/design-decisions/decide-on-self-hosted-runner-architecture.mdx new file mode 100644 index 000000000..0151b12cb --- /dev/null +++ b/docs/layers/github-actions/design-decisions/decide-on-self-hosted-runner-architecture.mdx @@ -0,0 +1,104 @@ +--- +title: "Decide on Self-Hosted Runner Architecture" +sidebar_label: Runner Architecture +description: Decide how to create self-hosted runners +--- + +import Intro from "@site/src/components/Intro"; +import Note from '@site/src/components/Note'; + + +Decide on how to operate self-hosted runners that are used to run GitHub Actions workflows. These runners can be set up in various ways and allow us to avoid platform fees while running CI jobs in private infrastructure, enabling access to VPC resources. This approach is ideal for private repositories, providing control over instance size, architecture, and control costs by leveraging spot instances. The right choice depends on your platform, whether you’re using predominantly EKS, ECS, or Lambda. + + +## Problem + +When using GitHub Actions, you can opt for both GitHub Cloud-hosted and self-hosted runners, and they can complement each other. In some cases, self-hosted runners are essential—particularly for accessing resources within a VPC, such as databases, Kubernetes API endpoints, or Kafka servers, which is common in GitOps workflows. + +However, while self-hosted runners are ideal for private infrastructure, they pose risks in public or open-source repositories due to potential exposure of sensitive resources. If your organization maintains open-source projects, this should be a critical consideration, and we recommend using cloud-hosted runners for those tasks. + +The hosting approach for self-hosted runners should align with your infrastructure. If you use Kubernetes, it's generally best to run your runners on Kubernetes. Conversely, if your infrastructure relies on ECS or Lambdas, you may want to avoid unnecessary Kubernetes dependencies and opt for alternative hosting methods. + +In Kubernetes-based setups, configuring node pools with Karpenter is key to maintaining stability and ensuring effective auto-scaling with a mix of spot and on-demand instances. However, tuning this setup can be challenging, especially with recent changes to ARC, where the [newer version does not support multiple labels for runner groups](https://github.com/actions/actions-runner-controller/issues/2445), leading to community disagreement over trade-offs. We provide multiple deployment options for self-hosted runners, including EKS, Philips Labs' solution, and Auto Scaling Groups (ASG), tailored to your specific runner management needs. + +## Considered Options + +### Option 1: EC2 Instances in an Auto Scaling Group (`github-runners`) + +The first option is to deploy EC2 instances in an Auto Scaling Group. This is the simplest option. We can use the +`github-runners` component to deploy the runners. However, this option is not as scalable as the other options. + +### Option 2: Actions Runner Controller on EKS (`eks/actions-runner-controller`) + +The second option is to deploy the Actions Runner Controller on EKS. Since many implementations already have EKS, this +option is a good choice to reuse existing infrastructure. + +We can use the `eks/actions-runner-controller` component to deploy the runners, which is built with the +[Actions Runner Controller helm chart](https://github.com/actions/actions-runner-controller). + +### Option 3: GitHub Actions Runner on EKS (`eks/github-actions-runner`) + +Alternatively, we can deploy the GitHub Actions Runner on EKS. This option is similar to the previous one, but it uses +the GitHub Actions Runner instead of the Actions Runner Controller. + +This component deploys self-hosted GitHub Actions Runners and a +[Controller](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller#introduction) +on an EKS cluster, using +"[runner scale sets](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#runner-scale-set)". + +This solution is supported by GitHub and supersedes the +[actions-runner-controller](https://github.com/actions/actions-runner-controller/blob/master/docs/about-arc.md) +developed by Summerwind and deployed by Cloud Posse's +[actions-runner-controller](https://docs.cloudposse.com/components/library/aws/eks/actions-runner-controller/) +component. + +However, there are some limitations to the official Runner Sets implementation: + +- #### Limited set of packages + + The runner image used by Runner Sets contains [no more packages than are necessary](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller#about-the-runner-container-image) to run the runner. This is in contrast to the Summerwind implementation, which contains some commonly needed packages like `build-essential`, `curl`, `wget`, `git`, and `jq`, and the GitHub hosted images which contain a robust set of tools. (This is a limitation of the official Runner Sets implementation, not this component per se.) You will need to install any tools you need in your workflows, either as part of your workflow (recommended), by maintaining a [custom runner image](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller#creating-your-own-runner-image), or by running such steps in a [separate container](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container) that has the tools pre-installed. Many tools have publicly available actions to install them, such as `actions/setup-node` to install NodeJS or `dcarbone/install-jq-action` to install `jq`. You can also install packages using `awalsh128/cache-apt-pkgs-action`, which has the advantage of being able to skip the installation if the package is already installed, so you can more efficiently run the same workflow on GitHub hosted as well as self-hosted runners. + + There are (as of this writing) open feature requests to add some commonly needed packages to the official Runner Sets runner image. You can upvote these requests [here](https://github.com/actions/actions-runner-controller/discussions/3168) and [here](https://github.com/orgs/community/discussions/80868) to help get them implemented. + +- #### Docker in Docker (dind) mode only + + In the current version of this component, only "dind" (Docker in Docker) mode has been tested. Support for "kubernetes" mode is provided, but has not been validated. + +- #### Limited configuration options + + Many elements in the Controller chart are not directly configurable by named inputs. To configure them, you can use the `controller.chart_values` input or create a `resources/values-controller.yaml` file in the component to supply values. + + Almost all the features of the Runner Scale Set chart are configurable by named inputs. The exceptions are: + + - There is no specific input for specifying an outbound HTTP proxy. + - There is no specific input for supplying a [custom certificate authority (CA) certificate](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#custom-tls-certificates) to use when connecting to GitHub Enterprise Server. + + You can specify these values by creating a `resources/values-runner.yaml` file in the component and setting values as shown by the default Helm [values.yaml](https://github.com/actions/actions-runner-controller/blob/master/charts/gha-runner-scale-set/values.yaml), and they will be applied to all runners. + +- #### Component limitations + + Furthermore, the Cloud Posse component has some additional limitations. In particular: + + - The controller and all runners and listeners share the Image Pull Secrets. You cannot use different ones for different + runners. + - All the runners use the same GitHub secret (app or PAT). Using a GitHub app is preferred anyway, and the single GitHub + app serves the entire organization. + - Only one controller is supported per cluster, though it can have multiple replicas. + +These limitations could be addressed if there is demand. Contact [Cloud Posse Professional Services](https://cloudposse.com/professional-services/) if you would be interested in sponsoring the development of any of these features. + +### Option 4: Philips Labs Runners (`philips-labs-github-runners`) + +If we are not deploying EKS, it's not worth the additional effort to set up Self-Hosted runners on EKS. Instead, we deploy Self-Hosted runners on EC2 instances. These are managed by an API Gateway and Lambda function that will automatically scale the number of runners based on the number of pending jobs in the queue. The queue is written to by the API Gateway from GitHub Events. + +For more on this option, see the [Philips Labs GitHub Runner](https://philips-labs.github.io/terraform-aws-github-runner/) documentation. + +### Option 5: Managed Runners + +There are a number of services that offer managed runners. These still have the advantage over GitHub Cloud hosted runners as the can be managed within you private VPCs. + +One option to consider is [runs-on.com](https://runs-on.com/) which provides a very inexpensive option. + +## Recommendation + +At this time Cloud Posse recommends the Actions Runner Controller on EKS (`eks/actions-runner-controller`) if you are using EKS and the Philips Labs Runners (`philips-labs-github-runners`) if you are not using EKS. diff --git a/docs/layers/github-actions/design-decisions/decide-on-self-hosted-runner-placement.mdx b/docs/layers/github-actions/design-decisions/decide-on-self-hosted-runner-placement.mdx new file mode 100644 index 000000000..bec905b65 --- /dev/null +++ b/docs/layers/github-actions/design-decisions/decide-on-self-hosted-runner-placement.mdx @@ -0,0 +1,44 @@ +--- +title: "Decide on Self-Hosted Runner Placement" +sidebar_label: Runner Placement +description: Decide where to place self-hosted runners in your AWS organization +--- +import Intro from '@site/src/components/Intro'; + + +Self-hosted runners are custom runners that we use to run GitHub Actions workflows. We can use these runners to access resources in our private networks and reduce costs by using our own infrastructure. We need to decide where to place these runners in your AWS organization. + + +## Problem + +We need to decide where to place self-hosted runners in your AWS organization. + +We support multiple options for deploying self-hosted runners. We can deploy runners with EKS, Philips Labs, or with an ASG. For this decision, we will focus on the placement of the runners themselves. + +## Considered Options + +### Option 1: Deploy the runners in an `auto` account + +The first option is to deploy the controller in the `auto` (Automation) account. This account would be dedicated to automation tasks and would have access to all other accounts. We can use this account to deploy the controller and manage the runners in a centralized location. + +However, compliance is complicated because the `auto` cluster would have access to all environments. + +### Option 2: Deploy the runners in each account + +The second option is to deploy the controller in each account. This option sounds great from a compliance standpoint. Jobs running in each account are scoped to that account, each account has its own controller, and we can manage the runners independently. + +This might seem like a simplification from a compliance standpoint, but it creates complexity from an implementation standpoint. We would need to carefully consider the following: + +1. Scaling runners can inadvertently impact IP space available to production workloads +2. Many accounts do not have a VPC or EKS Cluster (for EKS/ARC solutions). So, we would need to decide how to manage those accounts. +3. We would need to manage the complexity of dynamically selecting the right runner pool when a workflow starts. While this might seem straightforward, it can get tricky in cases like promoting an ECR image from staging to production, where it’s not always clear-cut which runners should be used. + +## Recommendation + +_Option 1: Deploy the runners in an `auto` account_ + +We will deploy the runners in an `auto` account. This account will be connected to the private network and will have access to all other accounts where necessary. This will simplify the management of the runners and ensure that they are available when needed. + +## Consequences + +We will create an `auto` account and deploy the runners there. diff --git a/docs/layers/github-actions/design-decisions/design-decisions.mdx b/docs/layers/github-actions/design-decisions/design-decisions.mdx new file mode 100644 index 000000000..1acf7f33c --- /dev/null +++ b/docs/layers/github-actions/design-decisions/design-decisions.mdx @@ -0,0 +1,13 @@ +--- +title: Design Decisions +sidebar_label: Review Design Decisions +sidebar_position: 1 +--- +import DocCardList from '@theme/DocCardList'; +import Intro from '@site/src/components/Intro'; + + +Review the key design decisions of the GitHub Action Layer. These decisions relate to how you will manage self-hosted runners for your GitHub Action workflows. + + + diff --git a/docs/layers/github-actions/eks-github-actions-controller.mdx b/docs/layers/github-actions/eks-github-actions-controller.mdx index c991fb401..c366bbc88 100644 --- a/docs/layers/github-actions/eks-github-actions-controller.mdx +++ b/docs/layers/github-actions/eks-github-actions-controller.mdx @@ -5,6 +5,7 @@ sidebar_label: "Actions Runner Controller (EKS)" import Intro from '@site/src/components/Intro'; import KeyPoints from '@site/src/components/KeyPoints'; import Note from '@site/src/components/Note'; +import Steps from '@site/src/components/Steps'; import Step from '@site/src/components/Step'; import StepNumber from '@site/src/components/StepNumber'; import AtmosWorkflow from '@site/src/components/AtmosWorkflow'; @@ -41,149 +42,151 @@ In order to deploy Self-Hosted GitHub Runners on EKS, follow the steps outlined - Then we need to decide on the SSM path for the GitHub secret (Application private key) and GitHub webhook secret. - - ### GitHub Application Private Key + + + ### GitHub Application Private Key - Since the secret is automatically scoped by AWS to the account and region where the secret is stored, we recommend the - secret be stored at `/github/acme/github_token`. + Since the secret is automatically scoped by AWS to the account and region where the secret is stored, we recommend the + secret be stored at `/github/acme/github_token`. - `stacks/catalog/eks/actions-runner-controller.yaml`: + `stacks/catalog/eks/actions-runner-controller.yaml`: - ```yaml - ssm_github_secret_path: "/github_runners/controller_github_app_secret" - ``` + ```yaml + ssm_github_secret_path: "/github_runners/controller_github_app_secret" + ``` - The preferred way to authenticate is by _creating_ and _installing_ a GitHub App. This is the recommended approach as it - allows for more much more restricted access than using a personal access token, at least until - [fine-grained personal access token permissions](https://github.blog/2022-10-18-introducing-fine-grained-personal-access-tokens-for-github/) - are generally available. Follow the instructions - [here](https://github.com/actions/actions-runner-controller/blob/master/docs/authenticating-to-the-github-api.md) to - create and install the GitHub App. + The preferred way to authenticate is by _creating_ and _installing_ a GitHub App. This is the recommended approach as it + allows for more much more restricted access than using a personal access token, at least until + [fine-grained personal access token permissions](https://github.blog/2022-10-18-introducing-fine-grained-personal-access-tokens-for-github/) + are generally available. Follow the instructions + [here](https://github.com/actions/actions-runner-controller/blob/master/docs/authenticating-to-the-github-api.md) to + create and install the GitHub App. - At the creation stage, you will be asked to generate a private key. This is the private key that will be used to - authenticate the Action Runner Controller. Download the file and store the contents in SSM using the following command, - adjusting the profile and file name. The profile should be the `admin` role in the account to which you are deploying - the runner controller. The file name should be the name of the private key file you downloaded. + At the creation stage, you will be asked to generate a private key. This is the private key that will be used to + authenticate the Action Runner Controller. Download the file and store the contents in SSM using the following command, + adjusting the profile and file name. The profile should be the `admin` role in the account to which you are deploying + the runner controller. The file name should be the name of the private key file you downloaded. - ``` - AWS_PROFILE=acme-core-use1-auto-admin chamber write github_runners controller_github_app_secret -- "$(cat APP_NAME.DATE.private-key.pem)" - ``` + ``` + AWS_PROFILE=acme-core-use1-auto-admin chamber write github_runners controller_github_app_secret -- "$(cat APP_NAME.DATE.private-key.pem)" + ``` - You can verify the file was correctly written to SSM by matching the private key fingerprint reported by GitHub with: + You can verify the file was correctly written to SSM by matching the private key fingerprint reported by GitHub with: - ``` - AWS_PROFILE=acme-core-use1-auto-admin chamber read -q github_runners controller_github_app_secret | openssl rsa -in - -pubout -outform DER | openssl sha256 -binary | openssl base64 - ``` + ``` + AWS_PROFILE=acme-core-use1-auto-admin chamber read -q github_runners controller_github_app_secret | openssl rsa -in - -pubout -outform DER | openssl sha256 -binary | openssl base64 + ``` - At this stage, record the Application ID and the private key fingerprint in your secrets manager (e.g. 1Password). You - will need the Application ID to configure the runner controller, and want the fingerprint to verify the private key. + At this stage, record the Application ID and the private key fingerprint in your secrets manager (e.g. 1Password). You + will need the Application ID to configure the runner controller, and want the fingerprint to verify the private key. - Proceed to install the GitHub App in the organization or repository you want to use the runner controller for, and - record the Installation ID (the final numeric part of the URL, as explained in the instructions linked above) in your - secrets manager. You will need the Installation ID to configure the runner controller. + Proceed to install the GitHub App in the organization or repository you want to use the runner controller for, and + record the Installation ID (the final numeric part of the URL, as explained in the instructions linked above) in your + secrets manager. You will need the Installation ID to configure the runner controller. - In your stack configuration, set the following variables, making sure to quote the values so they are treated as - strings, not numbers. + In your stack configuration, set the following variables, making sure to quote the values so they are treated as + strings, not numbers. - ``` - github_app_id: "12345" - github_app_installation_id: "12345" - ``` - + ``` + github_app_id: "12345" + github_app_installation_id: "12345" + ``` + - - ### GitHub Webhook Secret Token + + ### GitHub Webhook Secret Token - If using the Webhook Driven autoscaling (recommended), generate a random string to use as the Secret when creating the - webhook in GitHub. + If using the Webhook Driven autoscaling (recommended), generate a random string to use as the Secret when creating the + webhook in GitHub. - Generate the string using 1Password (no special characters, length 45) or by running + Generate the string using 1Password (no special characters, length 45) or by running - ```bash - dd if=/dev/random bs=1 count=33 2>/dev/null | base64 - ``` + ```bash + dd if=/dev/random bs=1 count=33 2>/dev/null | base64 + ``` - Store this key in AWS SSM under the same path specified by `ssm_github_webhook_secret_token_path` + Store this key in AWS SSM under the same path specified by `ssm_github_webhook_secret_token_path` - `stacks/catalog/eks/actions-runner-controller.yaml`: + `stacks/catalog/eks/actions-runner-controller.yaml`: - ```yaml - ssm_github_webhook_secret_token_path: "/github_runners/github_webhook_secret_token" - ``` - + ```yaml + ssm_github_webhook_secret_token_path: "/github_runners/github_webhook_secret_token" + ``` + -## Deploy + ## Deploy -Automation has an unique set of components from the `plat` clusters and therefore has its own Atmos Workflow. Notably, -`auto` includes the `eks/actions-runner-controller` component, which is used to create the `self-hosted` runners for the -GitHub Repository or Organization + Automation has an unique set of components from the `plat` clusters and therefore has its own Atmos Workflow. Notably, + `auto` includes the `eks/actions-runner-controller` component, which is used to create the `self-hosted` runners for the + GitHub Repository or Organization - - The first three steps before are all included in the following workflow: + + The first three steps before are all included in the following workflow: - - + + - - ### `iam-service-linked-roles` Component + + ### `iam-service-linked-roles` Component - At this point we assume that the `iam-service-linked-roles` component is already deployed for `core-auto`. If not, - deploy this component now with the following command: + At this point we assume that the `iam-service-linked-roles` component is already deployed for `core-auto`. If not, + deploy this component now with the following command: - ```bash - atmos terraform apply iam-service-linked-roles -s core-gbl-auto - ``` - + ```bash + atmos terraform apply iam-service-linked-roles -s core-gbl-auto + ``` + - - ### Deploy Automation Cluster and Resources + + ### Deploy Automation Cluster and Resources - Deploy the cluster with the same commands as `plat` cluster deployments: + Deploy the cluster with the same commands as `plat` cluster deployments: - - + + - Validate the `core-auto` deployment using Echo Server. For example: https://echo.use1.auto.core.acme-svc.com/ - + Validate the `core-auto` deployment using Echo Server. For example: https://echo.use1.auto.core.acme-svc.com/ + - - ### Deploy the Actions Runner Controller + + ### Deploy the Actions Runner Controller - Finally, deploy the `actions-runner-controller` component with the following command: + Finally, deploy the `actions-runner-controller` component with the following command: - ```bash - atmos terraform deploy eks/actions-runner-controller -s core-use1-auto - ``` + ```bash + atmos terraform deploy eks/actions-runner-controller -s core-use1-auto + ``` - + - - ### Using Webhook Driven Autoscaling (Click Ops) + + ### Using Webhook Driven Autoscaling (Click Ops) - To use the Webhook Driven autoscaling, you must also install the GitHub organization-level webhook after deploying the - component (specifically, the webhook server). The URL for the webhook is determined by the `webhook.hostname_template` - and where it is deployed. Recommended URL is - `https://gha-webhook.[environment].[stage].[tenant].[service-discovery-domain]`, which for this organization would be - `https://gha-webhook.use1.auto.core.acme-svc.com` + To use the Webhook Driven autoscaling, you must also install the GitHub organization-level webhook after deploying the + component (specifically, the webhook server). The URL for the webhook is determined by the `webhook.hostname_template` + and where it is deployed. Recommended URL is + `https://gha-webhook.[environment].[stage].[tenant].[service-discovery-domain]`, which for this organization would be + `https://gha-webhook.use1.auto.core.acme-svc.com` - As a GitHub organization admin, go to - `https://github.com/organizations/acme/settings/hooks`, and then: + As a GitHub organization admin, go to + `https://github.com/organizations/acme/settings/hooks`, and then: - - Click "Add webhook" and create a new webhook with the following settings: - - Payload URL: copy from Terraform output `webhook_payload_url` - - Content type: `application/json` - - Secret: whatever you configured in the secret above - - Which events would you like to trigger this webhook: - - Select "Let me select individual events" - - Uncheck everything ("Pushes" is likely the only thing already selected) - - Check "Workflow jobs" - - Ensure that "Active" is checked (should be checked by default) - - Click "Add webhook" at the bottom of the settings page + - Click "Add webhook" and create a new webhook with the following settings: + - Payload URL: copy from Terraform output `webhook_payload_url` + - Content type: `application/json` + - Secret: whatever you configured in the secret above + - Which events would you like to trigger this webhook: + - Select "Let me select individual events" + - Uncheck everything ("Pushes" is likely the only thing already selected) + - Check "Workflow jobs" + - Ensure that "Active" is checked (should be checked by default) + - Click "Add webhook" at the bottom of the settings page - After the webhook is created, select "edit" for the webhook and go to the "Recent Deliveries" tab and verify that there - is a delivery (of a "ping" event) with a green check mark. If not, verify all the settings and consult the logs of the - `actions-runner-controller-github-webhook-server` pod. - + After the webhook is created, select "edit" for the webhook and go to the "Recent Deliveries" tab and verify that there + is a delivery (of a "ping" event) with a green check mark. If not, verify all the settings and consult the logs of the + `actions-runner-controller-github-webhook-server` pod. + + # Related Topics diff --git a/docs/layers/github-actions/github-oidc-with-aws.mdx b/docs/layers/github-actions/github-oidc-with-aws.mdx index 4b6787357..4e20355c8 100644 --- a/docs/layers/github-actions/github-oidc-with-aws.mdx +++ b/docs/layers/github-actions/github-oidc-with-aws.mdx @@ -88,10 +88,18 @@ sequenceDiagram ### Deploy GitHub OIDC Provider Component After deploying the [GitHub OIDC Provider component](/components/library/aws/github-oidc-provider/) into an account, you should see the Identity Provider in IAM in the AWS Web Console. + + Deploy this component in each account where GitHub Actions need to assume a role. + + + - Import `catalog/github-oidc-provider` in the `gbl` stack for the given account + - Deploy the `github-oidc-provider` component: `atmos terraform apply github-oidc-provider -s plat-gbl-dev` + + - ### Configure GitHub OIDC Mixin Role and Policy + ### Option 1: Configure GitHub OIDC Mixin Role and Policy Use the mixin to grant GitHub the ability to assume a role for a specific component. @@ -100,7 +108,7 @@ sequenceDiagram - ### Deploy GitHub OIDC Role Component + ### Option 2: Deploy GitHub OIDC Role Component Deploy the [GitHub OIDC Role component](/components/library/aws/github-oidc-role/) to create a generalized role for GitHub to access several resources in AWS. diff --git a/docs/layers/github-actions/philips-labs-github-runners.mdx b/docs/layers/github-actions/philips-labs-github-runners.mdx index 98446b43c..377c4ec45 100644 --- a/docs/layers/github-actions/philips-labs-github-runners.mdx +++ b/docs/layers/github-actions/philips-labs-github-runners.mdx @@ -2,9 +2,16 @@ title: Philips Labs GitHub Action Runners sidebar_label: Philips Labs Action Runners --- -import Intro from '@site/src/components/Intro'; -import KeyPoints from '@site/src/components/KeyPoints'; -import AtmosWorkflow from '@site/src/components/AtmosWorkflow'; + +import Intro from "@site/src/components/Intro"; +import KeyPoints from "@site/src/components/KeyPoints"; +import AtmosWorkflow from "@site/src/components/AtmosWorkflow"; +import Steps from "@site/src/components/Steps"; +import Step from "@site/src/components/Step"; +import StepNumber from "@site/src/components/StepNumber"; +import TaskList from "@site/src/components/TaskList"; +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; If we are not deploying EKS, it's not worth the additional effort to set up Self-Hosted runners on EKS. Instead, we @@ -15,13 +22,13 @@ import AtmosWorkflow from '@site/src/components/AtmosWorkflow'; ## Quick Start -| Steps | Actions | Calling Workflow | -| :------------------------------------------- | :------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | -| 1. Generate GitHub App Private Key | Set SSM Param `"/pl-github-runners/key"` to App private Key base64 encoded | `atmos workflow deploy/pl-github-runners -f github` | -| 2. Note GitHub ID | Set SSM Param `"/pl-github-runners/id"` to the GitHub App ID | `atmos workflow deploy/pl-github-runners -f github` | -| 3. Deploy GitHub OIDC Provider | Deploy GitHub OIDC to every needed account | `atmos workflow deploy/github-oidc-provider -f github` | -| 4. Deploy GitHub Runners | Deploy the GitHub runners | `atmos workflow deploy/pl-github-runners -f github` | -| 5. Update Webhook (if changed or redeployed) | Update the GitHub App Webhook | (if `enable_update_github_app_webhook: true`) `atmos workflow deploy/pl-github-runners -f github`. Otherwise Manual | +| Steps | Actions | +| :------------------------------------------------- | :-------------------------------------------------------------------------------------- | +| 1. Create GitHub App | ClickOps | +| 2. Upload GitHub App ID and Private Key to AWS SSM | Set SSM Param `"/pl-github-runners/id"` and `"/pl-github-runners/key"` (base64 encoded) | +| 3. Deploy GitHub OIDC Provider | Deploy GitHub OIDC to every needed account | +| 4. Deploy GitHub Runners | `atmos terraform deploy philips-labs-github-runners -s core-use1-auto` | +| 5. Update Webhook (if changed or redeployed) | ClickOps | ## Deploy @@ -36,130 +43,132 @@ Follow the guide with the upstream module, [philips-labs/terraform-aws-github-runner](https://github.com/philips-labs/terraform-aws-github-runner#setup-github-app-part-1), or follow the steps below. -### Create the GitHub App - -:::info Customer Requirement - -This step requires access to the GitHub Organization. Customers will need to create this GitHub App in Jumpstart -engagements. + + + ### Vendor Components -::: + Vendor in the necessary components with the following workflow: -1. Create a new GitHub App -1. Choose a name -1. Choose a website (mandatory, not required for the module). -1. Disable the webhook for now (we will configure this later or create an alternative webhook). -1. Add the following permission: + -```diff -# Required Permissions for Repository Runners: -## Permissions for all runners: -### Repository: -+ Actions: Read-only (check for queued jobs) -+ Checks: Read-only (receive events for new builds) -+ Metadata: Read-only (default/required) + -## Permissions for repository-level runners only: -### Repository: -+ Administration: Read & write (to register runner) + + ### Create the GitHub App -## Permissions for organization-level runners only: -### Organization -+ Self-hosted runners: Read & write (to register runner) -``` + :::info Customer Requirement -6. Generate a Private Key + This step requires access to the GitHub Organization. Customers will need to create this GitHub App in Jumpstart + engagements. -If you are working with Cloud Posse, upload this Private Key and GitHub App ID to 1Password and inform Cloud Posse. -Otherwise, continue with the component deployment in `core-use1-auto`. + ::: -### Deploy the `philips-labs-github-runner` Component + + 1. Create a new GitHub App + 1. Choose a name + 1. Choose a website (mandatory, not required for the module). + 1. Disable the webhook for now (we will configure this later or create an alternative webhook). + 1. Add the following permission for your chosen runner scope: -:::tip + + + #### Repository Permissions + + - Actions: Read-only (check for queued jobs) + - Checks: Read-only (receive events for new builds) + - Metadata: Read-only (default/required) + - Administration: Read & write (to register runner) + + -This step does _not_ require access to the GitHub Organization. Cloud Posse will run this deployment for Jumpstart -engagements. + + #### Repository Permissions + + - Actions: Read-only (check for queued jobs) + - Checks: Read-only (receive events for new builds) + - Metadata: Read-only (default/required) + + #### Organization Permissions + + - Self-hosted runners: Read & write (to register runner) + + + -::: + 1. Generate a Private Key + 1. If you are working with Cloud Posse, upload this Private Key and GitHub App ID to 1Password and inform Cloud Posse. Otherwise, continue to the next step. + -Run the `deploy/pl-github-runners` workflow with `atmos` to write the GitHub App information to the `core-use1-auto` -SSM account and deploy the component. + - + + ### Upload AWS SSM Parameters -This is the same as the following steps: + :::tip -1. Upload the PEM file key to the specified ssm path, `/pl-github-runners/key`, in `core-use1-auto` as a base64 - encoded string. -2. Upload the GitHub App ID to the specified ssm path, `/pl-github-runners/id`, in `core-use1-auto`. -3. Deploy the `philips-labs-github-runners` component to `core-use1-auto`. Run this with the following: + This step does _not_ require access to the GitHub Organization. Cloud Posse will run this deployment for Jumpstart + engagements. -```console -atmos terraform apply philips-labs-github-runners -s core-use1-auto -``` + ::: -Once the component is deployed, save the webhook URL and secret in 1Password. The endpoint can be found with the -following: + Now that the GitHub App has been created, upload the Private Key and GitHub App ID to AWS SSM Parameter Store in `core-use1-auto` (or your chosen region). -```console -atmos terraform output philips-labs-github-runners -s core-use1-auto 'webhook' -``` + + 1. Upload the PEM file key to the specified ssm path, `/pl-github-runners/key`, in `core-use1-auto` as a base64 encoded string. + 2. Upload the GitHub App ID to the specified ssm path, `/pl-github-runners/id`, in `core-use1-auto`. + -### Add the Webhook to the GitHub App + Or run the `upload/pl-secrets` workflow with `atmos` to write the GitHub App information to the `core-use1-auto` SSM account and deploy the component. -:::info Customer Requirement + -This step requires access to the GitHub Organization. Customers will need to finalize the GitHub App in Jumpstart -engagements. + -::: + + ### Deploy GitHub OIDC Providers -Now that the component is deployed and the webhook has been created, add that webhook to the GitHub App. Both the -webhook URL and secret should now be stored in 1Password. If not, you can retrieve these values from the output of the -`philips-labs-github-runners` component in `core-use1-auto` as described in the previous step. + First deploy the GitHub OIDC provider to all accounts where we want to grant GitHub access. The typical list of accounts + is included with the `deploy/github-oidc-provider` workflow; run the following with `SuperAdmin`: -1. Open the GitHub App created in - [Create the GitHub App above](/layers/github-actions/philips-labs-github-runners/#create-the-github-app) -1. Enable the webhook. -1. Provide the webhook url, should be part of the output of terraform. -1. Provide the webhook secret (`terraform output -raw `). -1. In the _"Permissions & Events"_ section and then _"Subscribe to Events"_ subsection, check _"Workflow Job"_. + -1. Ensure the webhook for the GitHub app is enabled and pointing to the output of the module. - - The endpoint can be found from `atmos terraform output philips-labs-github-runners -s core-use1-auto 'webhook'` - -## Vendor - -Vendor in the necessary components with the following workflow: - - - -## Deploy + -:::info + + ### Deploy the Philips Labs GitHub Runners -You can run all workflows at once in the right order with + Now that the GitHub App has been created and the SSM parameters have been uploaded, deploy the `philips-labs-github-runners` component. - + -::: + -### GitHub OIDC Provider + + ### Add the Webhook to the GitHub App -First deploy the GitHub OIDC provider to all accounts where we want to grant GitHub access. The typical list of accounts -is included with the `deploy/github-oidc-provider` workflow; run the following with `SuperAdmin`: + :::info Customer Requirement - + This step requires access to the GitHub Organization. Customers will need to finalize the GitHub App in Jumpstart + engagements. -### GitHub Runners + ::: -To deploy the self-hosted runners themselves, first verify [the GitHub App requirement](#Requirements) is complete -including both SSM parameters. Next we will deploy the token rotator and then the runners themselves. + Now that the component is deployed and the webhook has been created, add that webhook to the GitHub App. Both the + webhook URL and secret should now be stored in 1Password. If not, you can retrieve these values from the output of the + `philips-labs-github-runners` component in `core-use1-auto` as described in the previous step. - + + 1. Open the GitHub App created in + [Create the GitHub App above](/layers/github-actions/philips-labs-github-runners/#create-the-github-app) + 1. Enable the webhook. + 1. Provide the webhook url, should be part of the output of terraform. + 1. Provide the webhook secret (`terraform output -raw `). + 1. In the _"Permissions & Events"_ section and then _"Subscribe to Events"_ subsection, check _"Workflow Job"_. + 1. Ensure the webhook for the GitHub app is enabled and pointing to the output of the module. The endpoint can be found by running `atmos terraform output philips-labs-github-runners -s core-use1-auto 'webhook'` + -If all goes well, you should now see self-hosted runners registered to your infrastructure repository in GitHub. If not, -read through the attached [FAQs](#FAQ). + + ## Usage @@ -189,9 +198,9 @@ Remove the `terraform` label from the default runner set and add the `terraform` Since the workflows are all labeled with `terraform` already, they will automatically select the new runner set on their next run. -# FAQ +## FAQ -## I cannot assume the role from GitHub Actions after deploying +### I cannot assume the role from GitHub Actions after deploying The following error is very common if the GitHub workflow is missing proper permission. diff --git a/docs/layers/identity/aws-saml.mdx b/docs/layers/identity/aws-saml.mdx index 77ef620c6..5be6d399a 100644 --- a/docs/layers/identity/aws-saml.mdx +++ b/docs/layers/identity/aws-saml.mdx @@ -30,14 +30,24 @@ identity. You can use this federated identity to connect directly to a given AWS Here are some example setup references: - Follow the [AWS documentation for Google Workspace](https://aws.amazon.com/blogs/desktop-and-application-streaming/setting-up-g-suite-saml-2-0-federation-with-amazon-appstream-2-0/). Once you have completed the setup, download the metadata file. + + 1. Open the [AWS documentation for GSuite](https://aws.amazon.com/blogs/desktop-and-application-streaming/setting-up-g-suite-saml-2-0-federation-with-amazon-appstream-2-0/) + 1. Follow Steps 1 through 7. This document refers to Appstream, but the process will be the same for AWS. + 1. Once you have completed the setup, download the metadata file. + - - Follow the [Okta documentation](https://help.okta.com/en-us/Content/Topics/DeploymentGuides/AWS/aws-configure-identity-provider.htm). Once you have completed the setup, download the metadata file. + + + 1. Create an "Amazon Web Services Account Federation" application in Okta. + 1. Select "SAML 2.0" from the Sign-On Method. + 1. View and download the identity provider (IdP) metadata file. + + + For details, please see the official [Okta documentation](https://help.okta.com/en-us/Content/Topics/DeploymentGuides/AWS/aws-configure-identity-provider.htm) - + Follow the [JumpCloud documentation](https://support.jumpcloud.com/support/s/article/getting-started-applications-saml-sso2). Once you have completed the setup, download the metadata file. @@ -51,18 +61,52 @@ identity. You can use this federated identity to connect directly to a given AWS ## Import the metadata file from the chosen provider. + Download and save the metadata file with the `aws-saml` component directory. + 1. Place this file inside the `aws-saml` component directory (`components/terraform/aws-saml/`) - 2. Commit this to version control. The filename should match the variable configured in `stacks/catalog/aws-saml.yaml`. + 1. The filename should match the variable configured in the `aws-saml` stack catalog (`stacks/catalog/aws-saml.yaml`). + 1. Commit this to version control. + + + + + Make sure the `var.saml_providers` map key ends with `-okta`. We filter by this suffix to determine whether or not to set up a dedicated user for Okta. This is only necessary for Okta. + + ```yaml + saml_providers: + acme-okta: "OktaIDPMetadata-acme.com.xml" + ``` + + ## Deploy the SAML Integration + Deploy the `aws-saml` component to your Identity account. + ```bash atmos terraform apply aws-saml -s core-gbl-identity ``` + + + + + ## Complete the Identity Provider (IdP) setup + + If necessary, complete the integration setup in your chosen IdP. This will vary depending on the provider. + + + + + Follow the steps in the [official Okta documentation](https://help.okta.com/en-us/content/topics/deploymentguides/aws/aws-configure-aws-app.htm) to complete the setup. Please review the following tips, as we've encountered these issues in the past: + + - Deploying the `aws-saml` component will create an AWS IAM User, which Okta will be used to discover roles in AWS. This user's access key and secret key are stored in AWS SSM Parameter Store in the same account and (default) region as the `aws-saml` component. This is unique for Okta. + - In the "Provisioning" tab for the integration in Okta, you must check the **"Update User Attributes"** box. This does not appear in documentation but is necessary for the roles to populate in Okta. + + diff --git a/docs/layers/identity/centralized-terraform-access.mdx b/docs/layers/identity/centralized-terraform-access.mdx index 9ae7ab1aa..2da3e3535 100644 --- a/docs/layers/identity/centralized-terraform-access.mdx +++ b/docs/layers/identity/centralized-terraform-access.mdx @@ -115,20 +115,20 @@ With “automatic provisioning” in AWS, we can synchronize all users and group ```mermaid flowchart LR subgraph external_idp["External Identity Provider"] - user1["User 1"] - user2["User 2"] - user3["User 3"] - group1["Group 1"] + user1["IdP User: Homer"] + user2["IdP User: Lisa"] + user3["IdP User: Bart"] + group1["IdP Group: DevOps"] user1 --> group1 user2 --> group1 user3 --> group1 end subgraph aws_iam_identity_center["AWS IAM Identity Center"] - user1_copy["User 1 Copy"] - user2_copy["User 2 Copy"] - user3_copy["User 3 Copy"] - group1_copy["Group 1 Copy"] + user1_copy["Identity Center User: Homer"] + user2_copy["Identity Center User: Lisa"] + user3_copy["Identity Center User: Bart"] + group1_copy["Identity Center Group: DevOps"] permissions["Permission Sets"] user1_copy -.-> group1_copy user2_copy -.-> group1_copy @@ -210,14 +210,19 @@ flowchart TB ### Why use AWS SAML? -The `aws-saml` component provides SAML access for Admin users to connect to the Identity account admin role `aws-teams` -without AWS IAM Identity Center (Successor to AWS Single Sign-On). +SAML offers granular control over identity management with your IdP in AWS. With our `aws-saml` component, you can integrate multiple IdPs, and we explicitly provision every role, policy, and trust relationship needed. The `aws-saml` component allows users to assume any number of roles to access AWS accounts that are associated with their IdP groups. This component creates an Identity Provider (IdP) in the Identity account to allow federated access to an identity role. Follow the Identity Providers documentation for adding a SAML login. With AWS SAML, we create a federated SAML login that connects to the "team" in the identity account, and then users can assume other roles from there. We use the [AWS Extend Switch Roles plugin](https://github.com/tilfinltd/aws-extend-switch-roles) that makes this much easier, but it's not as intuitive as Identity Center. +:::info What is a federated login? + +A federated login means that instead of managing separate credentials for each AWS account, users authenticate through a centralized identity provider (IdP). This allows them to access multiple AWS accounts or services using a single set of credentials, based on trust relationships established between the IdP and AWS. + +::: + ```mermaid flowchart TB subgraph "Human Access Architecture with AWS SAML" @@ -261,7 +266,9 @@ flowchart TB ### How do I use AWS IAM Identity Center (SSO)? -The `aws-sso` component connects AWS IAM Identity Center (Successor to AWS Single Sign-On) Groups to Permission Sets. +AWS IAM Identity Center (SSO) is natively integrated with the AWS Web Console and the AWS CLI. You don't need to do anything special to get started with it once it's been properly configured. + +Specifically, the `aws-sso` component connects AWS IAM Identity Center (Successor to AWS Single Sign-On) Groups to Permission Sets. Permission Sets grant access to `aws-teams` in the Identity account or (optional) access to an individual account for convenience. diff --git a/docs/layers/identity/design-decisions/decide-on-aws-cli-login.mdx b/docs/layers/identity/design-decisions/decide-on-aws-cli-login.mdx new file mode 100644 index 000000000..a16607874 --- /dev/null +++ b/docs/layers/identity/design-decisions/decide-on-aws-cli-login.mdx @@ -0,0 +1,62 @@ +--- +title: "Decide on AWS CLI Login" +sidebar_label: "AWS CLI Login" +description: Decide on a CLI tool that enables AWS login and credentials via SAML IDP for CLI and web console access. +--- +import Intro from '@site/src/components/Intro'; +import KeyPoints from '@site/src/components/KeyPoints'; +import Note from '@site/src/components/Note'; +import TaskList from '@site/src/components/TaskList'; + + +Decide on a CLI tool that enables AWS login and credentials via SAML IDP for CLI and web console access. + + +## Problem + +Users need some way to login into AWS when using the CLI or applying Terraform changes. We have AWS Identity Center or AWS SAML setup for an AWS organization, but we need a way to login to AWS locally. + +There are a number of tools that can help with this, but we need to decide on one. + +### Option 1: Use the AWS CLI + +First of all, we could use the AWS CLI to login to AWS. This is the most basic way to login to AWS, but it requires a lot of manual steps and is not very user-friendly. + +### Option 2: Use Leapp + +Alternatively we could use Leapp by Noovolari. This is a tool that allows you to login to AWS using SAML and then automatically generates temporary credentials for you to use in the CLI. Once setup, Leapp makes it very easy to login to AWS and use the CLI, assume roles across your accounts with Role Chaining pre-configured, and even launch directly into the AWS web console. + +> ![IMPORTANT] +> Leapp has been a popular choice for this use case, but with Noovolari announcing the shutdown of their paid service, this could raise concerns about the long-term viability of the project. While the [Leapp](https://github.com/Noovolari/leapp) project will continue to be supported, the discontinuation of the paid option might make it less appealing to future users. + +Leapp requires several manual steps during the initial setup, which has been a pain point for some users. See [How to Login to AWS (with Leapp)](/layers/identity/how-to-log-into-aws/) for more on the required setup and usage. + +Leapp requires setup steps outside of our Geodesic containers, which makes it less convenient for users who primarily work in the shell and increases the likelihood of configuration errors. + +### Option 3: Use `aws-sso-cli` (AWS SSO Only) + +The most recent option we've come across is [aws-sso-cli](https://github.com/synfinatic/aws-sso-cli). This is a CLI tool that allows you to login to AWS using SAML and then automatically generates temporary credentials for you to use in the CLI. It is similar to Leapp, and is also open source and free to use. It also has a number of features that make it easier to use, such as the ability to login to multiple AWS accounts and roles at the same time. + +One potential benefit of `aws-sso-cli` is that it is a CLI tool, which means it could likely be integrated into our Geodesic containers. This would make it easier for users to login to AWS and use the CLI, and would reduce the risk of user configuration errors. + +However, `aws-sso-cli` is designed specifically for AWS SSO, which means it may not be suitable for users who are using AWS SAML. + +### Option 4: Use `saml2aws` (AWS SAML Only) + +Another option is to use `saml2aws`, which is a CLI tool that allows you to login to AWS using SAML. It is similar to Leapp and `aws-sso-cli`, but is specifically designed for AWS SAML. This means it may not be suitable for users who are using AWS SSO. + +Most IdPs supported by `aws2saml` with the exception of Okta, depend on screen scraping for SAML logins, which is far from ideal. This approach can lead to issues, especially with services like GSuite that use bot protection, which occasionally disrupts users attempting to log in. Additionally, SAML providers differ in how they handle login processes and multi-factor authentication (MFA), meaning you may need to make specific adjustments to ensure smooth integration with your identity provider. + +If your organization uses Okta, then `aws2saml` is good option. + +### Option 5: Use a browser plugin + +Another option is to use a browser plugin, such as [aws-extend-switch-roles](https://github.com/tilfinltd/aws-extend-switch-roles), that allows you to login to AWS using SAML. This is a simple and user-friendly way to login to AWS, but it requires you to use a browser and is not suitable for users who are working in the CLI. + +### Option 6: Use a custom solution + +Finally we could build our own custom solution for logging into AWS. This would give us complete control over the process, but would require a lot of development effort and ongoing maintenance. + +## Recommendation + +Cloud Posse continues to recommend Leapp for now, but we are evaluating alternatives. diff --git a/docs/layers/identity/identity.mdx b/docs/layers/identity/identity.mdx index b4739bf8e..3ae4c3a43 100644 --- a/docs/layers/identity/identity.mdx +++ b/docs/layers/identity/identity.mdx @@ -45,7 +45,7 @@ Let’s start by identifying the minimum requirements for an identity and authen Plus, all users and permissions must be centrally managed and integrated into an identity platform. We don’t want to copy and paste permissions across accounts or rely on any manual processes - ### Highly precise control of user groups + ### Tight control over user groups Next, we need fine grained access control for user groups. Then assign users to one or more groups depending on what they need access to. It needs to be easy to understand for both users and administrators. @@ -94,9 +94,7 @@ Now you may be asking some questions. There are plenty of existing solutions out
-AWS doesn’t make it easy to log in. Control Tower lacked APIs, until recently. Identity Center doesn’t work with automation. And IAM roles with SAML is cumbersome to use. - -It’s gotten a lot better with Identity Center, which is one of the reasons we recommend it, but it still doesn’t solve the problem of how to distribute configurations to developers to easily log in on their workstations or how to use IAM roles with SAML. +Ultimately, AWS does not provide a single solution that meets all our requirements. We need to combine the best of both worlds. Identity Center is great for human access, but it doesn’t work well for machines. On the other hand, AWS SAML is great for machines but is cumbersome for users to navigate the AWS web console without a third party tool. ## Our Solution @@ -118,19 +116,19 @@ Users can sign into Identity Center to access any account they have access to wi ```mermaid flowchart LR - subgraph identity_teams["identity Teams"] + subgraph identity_teams["Identity Teams"] devops["devops"] end - subgraph dev_team_roles["dev Team Roles"] + subgraph dev_team_roles["Dev Team Roles"] terraform_dev["terraform"] end - subgraph staging_team_roles["staging Team Roles"] + subgraph staging_team_roles["Staging Team Roles"] terraform_staging["terraform"] end - subgraph prod_team_roles["prod Team Roles"] + subgraph prod_team_roles["Prod Team Roles"] terraform_prod["terraform"] end @@ -149,18 +147,24 @@ We’ll explain in detail in the following pages. Then to make it incredibly easy for users to log in, we recommend using Leapp to manage local AWS sessions. Leap supports all Identity Providers and can automatically refresh credentials when they expire. -Plus, it’s open source and free to use. It also offers paid options that make it easier for administrators to share configurations. Please note, the Cloud Posse solution does not require any subscription. +Plus, it’s open source and free to use. ### Pre-Configured for your Team -Finally, to get everyone up and running on your Team, we pre-configured a single, common AWS config for all users. +Finally, to get everyone up and running on your Team, we preconfigured a single, common AWS config for all users. + +All users connect to AWS using the same AWS profile name. Then we can use the same AWS config to assume any other role from that given AWS profile if their team has permission. This way, users can quickly apply Terraform across accounts and assume roles directly in the accounts they have access. + +:::tip Using Dynamic Terraform Roles + +With dynamic Terraform roles, we can choose to deploy a `terraform` and a `planner` role into any given account. The `terraform` role is used to apply changes, and the `planner` role is used to plan changes. For more on dynamic Terraform roles, see the [Dynamic Terraform Roles](/layers/identity/docs/dynamic-terraform-roles/) documentation. -We preinstall the AWS config in the Geodesic toolbox, and users will automatically assume the correct access roles in AWS. Users can quickly apply Terraform across accounts and assume roles directly in the accounts they have access. +::: ## Next Steps - Next, we'll explain how we've implemented this solution in details. + Next, we'll explain how we've implemented this solution in detail. If you're curious about the thought that went into this process, please review the design decisions documentation.
diff --git a/docs/layers/monitoring/datadog/datadog.mdx b/docs/layers/monitoring/datadog/datadog.mdx index 7aaa5ebe4..d0abf3fb7 100644 --- a/docs/layers/monitoring/datadog/datadog.mdx +++ b/docs/layers/monitoring/datadog/datadog.mdx @@ -4,10 +4,13 @@ sidebar_label: "Datadog" sidebar_position: 10 description: Monitor everything with Datadog --- +import Intro from '@site/src/components/Intro'; import Steps from '@site/src/components/Steps'; import Admonition from '@theme/Admonition'; -Cloud Posse advises customers that all have very similar monitoring requirements because we filter for customers who match our delivery model. The actual fine-tuning of it will be specific to the customer. + + This outlines Cloud Posse's practical approach to monitoring with Datadog, focusing on defining reusable Service Level Indicators (SLIs) and Service Level Objectives (SLOs) for consistent implementation across customer environments. It aims to help businesses streamline monitoring and incident management by aligning technical performance with business goals. + Our goal with this document is to identify the reusable, standard SLI/SLO for our customers that we can readily implement time and time again. @@ -91,6 +94,7 @@ Golden Signals are a form of telemetry that applies to anything with throughput. These golden signals are closely related to the [RED metrics](https://www.weave.works/blog/the-red-method-key-metrics-for-microservices-architecture/) for microservices: rate, errors, and duration, and the older [USE method](https://www.weave.works/blog/the-red-method-key-metrics-for-microservices-architecture/) focusing on utilization, saturation, and errors. These signals are used to calculate the service level objectives (SLOs). ### Other Signals + Here are other useful metrics that are examples that don’t necessarily fit into the Golden Signals. #### Pull Requests @@ -115,6 +119,7 @@ Here are other useful metrics that are examples that don’t necessarily fit int
The number of pull requests merged by admins without the usual approval process. This metric highlights instances where standard review procedures were skipped.
+ #### Code Quality
diff --git a/docs/layers/monitoring/grafana/setup.mdx b/docs/layers/monitoring/grafana/setup.mdx index fff6d1a21..40afd75e6 100644 --- a/docs/layers/monitoring/grafana/setup.mdx +++ b/docs/layers/monitoring/grafana/setup.mdx @@ -21,7 +21,7 @@ import AtmosWorkflow from '@site/src/components/AtmosWorkflow'; - ### Vendor Components + ## Vendor Components Vendor all required components diff --git a/docs/layers/project/create-repository.mdx b/docs/layers/project/create-repository.mdx index 9889f2c0c..19d80ec1a 100644 --- a/docs/layers/project/create-repository.mdx +++ b/docs/layers/project/create-repository.mdx @@ -69,7 +69,7 @@ Learn how to create a GitHub repository to host infrastructure tools and configu With the GitHub repository prepared, we are now ready to import the Cloud Posse reference architecture. - The contents of this respository are supplied as part of our [Quickstart](/quickstart) or [Jumpstart](/jumpstart) packages. For the remainder of this guide, we will assume you have access to the reference architecture configurations. + The contents of this repository are supplied as part of our [Quickstart](/quickstart) or [Jumpstart](/jumpstart) packages. For the remainder of this guide, we will assume you have access to the reference architecture configurations. Learn More diff --git a/docs/layers/project/design-decisions/decide-on-secrets-management-placement.mdx b/docs/layers/project/design-decisions/decide-on-secrets-management-placement.mdx new file mode 100644 index 000000000..454e69f70 --- /dev/null +++ b/docs/layers/project/design-decisions/decide-on-secrets-management-placement.mdx @@ -0,0 +1,55 @@ +--- +title: "Decide on Secrets Placement for Terraform" +sidebar_label: "Secrets Placement" +sidebar_position: 5 +refarch_id: REFARCH-81 +description: "Decide where to store secrets used by Terraform" +--- +import Intro from '@site/src/components/Intro'; + + +We need to decide where to store secrets used by Terraform. We have two options: store secrets in each account or store them in a centralized account. + + +## Context + +Often we need to integrate with third-party services or internal services that require API keys or other secrets. We need to decide where to store these secrets so that Terraform can access them. There are two reasonable options for storing secrets in our AWS account architecture. We need to decide which one to use. + +### Option 1: Store Secrets in each account + +The first option is to store the credential in the same account as the resource. For example, API keys scoped to `dev` would live in `plat-dev`. + +#### Pros + +- Accounts can easily access their given credentials +- IAM level boundaries are enforced between accounts + +#### Cons + +- Secret administrators need to access many accounts to create those secrets +- There is no centralized management for all secrets out there + +### Option 2: Store Credentials in a Centralized Account + +The second option is to store the credentials in a centralized account, such as `corp` or `auto`. Now you would need to share those credentials with each account, for example with [AWS RAM](https://aws.amazon.com/ram/). + +#### Pros + +- Centralized secrets management +- Secret administrators have a single place to manage secrets +- Once shared, resources in a given account still access their given secrets from their own account. They do not need to reach out to another account + +#### Cons + +- Complexity with AWS RAM +- Secret administrators must be careful to share secrets with the correct accounts +- You need to decide what account to use as the centralized management account. We could deploy `corp` if you'd like for this or reuse `auto`. + +## Decision + +We will use AWS SSM Parameter Store for all platform-level secrets used by `infrastructure` and `terraform`. + +## Related + +- [Decide on Secrets Strategy for Terraform](/layers/project/design-decisions/decide-on-secrets-management-strategy-for-terraform/) + diff --git a/docs/layers/project/design-decisions/decide-on-secrets-management-strategy-for-terraform.mdx b/docs/layers/project/design-decisions/decide-on-secrets-management-strategy-for-terraform.mdx index a39fb5643..1d5894ca1 100644 --- a/docs/layers/project/design-decisions/decide-on-secrets-management-strategy-for-terraform.mdx +++ b/docs/layers/project/design-decisions/decide-on-secrets-management-strategy-for-terraform.mdx @@ -3,13 +3,13 @@ title: "Decide on Secrets Management Strategy for Terraform" sidebar_label: "Secrets Management" sidebar_position: 5 refarch_id: REFARCH-81 -description: "Decide where to manage secrets used by Terraform" +description: "Decide how to manage secrets used by Terraform" --- import Intro from '@site/src/components/Intro'; import KeyPoints from '@site/src/components/KeyPoints'; -Deciding where to store secrets is crucial for securing both platform integration and application data when using Terraform. The appropriate secret store depends on the stack layer and must account for situations where other infrastructure might not yet be in place (e.g. Vault, Kubernetes, etc). +Deciding how to store secrets is crucial for securing both platform integration and application data when using Terraform. The appropriate secret store depends on the stack layer and must account for situations where other infrastructure might not yet be in place (e.g. Vault, Kubernetes, etc). We need to decide where secrets will be kept. We’ll need to be able to securely store platform integration secrets (e.g. master keys for RDS, HashiCorp Vault unseal keys, etc) as well as application secrets (any secure customer data). @@ -20,10 +20,13 @@ like ASM/SSM is required. - e.g. Vault deployed as helm chart in each tenant environment using KMS keys for automatic unsealing (this chart already exists) -- SSM Parameter Store + KMS for all platform-level secrets used by `infrastructure` and `terraform` +- SSM Parameter Store + KMS for all platform-level secrets used by `infrastructure` and Terraform -- AWS Secrets Manager supports automatic key rotation which almost nothing other than RDS supports and requires - applications to be modified in order to use it to the full extent. +- AWS Secrets Manager supports automatic key rotation which almost nothing other than RDS supports and requires applications to be modified in order to use it to the full extent. + +## Recommendation + +We will use AWS SSM Parameter Store for all platform-level secrets used by `infrastructure` and Terraform. ## Related diff --git a/docs/layers/project/toolbox.mdx b/docs/layers/project/toolbox.mdx index 1ed116369..e2b0bdbf1 100644 --- a/docs/layers/project/toolbox.mdx +++ b/docs/layers/project/toolbox.mdx @@ -8,6 +8,10 @@ import Intro from '@site/src/components/Intro'; import ActionCard from '@site/src/components/ActionCard'; import PrimaryCTA from '@site/src/components/PrimaryCTA'; import DismissibleDialog from '@site/src/components/DismissibleDialog'; +import CodeBlock from '@theme/CodeBlock'; +import CollapsibleText from '@site/src/components/CollapsibleText'; +import PartialDockerfile from '@site/examples/snippets/components/docker/infra-acme/Dockerfile'; +import PartialMakefile from '@site/examples/snippets/Makefile'; import Note from '@site/src/components/Note'; @@ -27,17 +31,23 @@ Geodesic is a powerful Linux toolbox container designed to optimize DevOps workf

Where are the configs?

-

The configurations are availble via our Quickstart

+

The configurations are available via our Quickstart

Try Quickstart
## Building the Toolbox Image -Build the Geodesic infrastructure container. This is a container that has all the tools for building the app. It's built -from the `Dockerfile` using a `Makefile`. +Build a Geodesic infrastructure container. This container that has all the tools like terraform and atmos for building infrastructure. It's built from the `Dockerfile` and there are some predefined targets defined in the `Makefile` to make this easy. Customize these for your organization. Here are examples of both for reference. -The standard `Makefile` includes a number of commands. In order to build the initial, complete Geodesic image, run the -following: + + {PartialDockerfile} + + + + {PartialMakefile} + + +The standard `Makefile` includes a number of commands. In order to build the initial, complete Geodesic image, run the following: ```bash make all @@ -45,6 +55,18 @@ make all On future builds, use `make run` to use the cached image. +:::tip Alias + +We install a wrapper script with `make all` to your chosen namespace. For example, simply enter for given namespace to start your Geodesic container once built: + +```bash +acme +``` + +See the `install` step of the `Makefile` for more details. + +::: + Build the toolbox image locally before continuing. Follow the [toolbox image setup steps in the How-to Get Started guide](/layers/project/#building-the-toolbox-image). In short, diff --git a/docs/layers/software-delivery/ecs-ecspresso/ecs-ecspresso.mdx b/docs/layers/software-delivery/ecs-ecspresso/ecs-ecspresso.mdx index e4cffc6f8..21a112b14 100644 --- a/docs/layers/software-delivery/ecs-ecspresso/ecs-ecspresso.mdx +++ b/docs/layers/software-delivery/ecs-ecspresso/ecs-ecspresso.mdx @@ -7,6 +7,7 @@ import Intro from '@site/src/components/Intro'; import KeyPoints from '@site/src/components/KeyPoints'; import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +import CollapsibleText from '@site/src/components/CollapsibleText'; We use the [`ecspresso`](https://github.com/kayac/ecspresso) deployment tool for Amazon ECS to manage ECS services using a code-driven approach, alongside reusable GitHub Action workflows. This setup allows tasks to be defined with Terraform within the infrastructure repository, and task definitions to reside alongside the application code. Ecspresso provides extensive configuration options via YAML, JSON, and Jsonnet, and includes plugins for enhanced functionality such as Terraform state lookups. @@ -57,192 +58,192 @@ sequenceDiagram ### Github Action Workflows -The basic deployment flow is for feature branches. You can use the following -sample workflow to add pull request deploys to your application repository: +The basic deployment flow is for feature branches. You can use the following sample workflow to add pull request deploys to your application repository: -
-Deploy +:::tip Latest Examples - - - - -```yaml -name: Feature Branch -on: - pull_request: - branches: [ 'main' ] - types: [opened, synchronize, reopened, closed, labeled, unlabeled] - -permissions: - pull-requests: write - deployments: write - id-token: write - contents: read - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: false - -jobs: - monorepo: - uses: cloudposse/github-actions-workflows/.github/workflows/controller-monorepo.yml@main - with: - file: ./deploy/config.yaml - - ci: - uses: cloudposse/github-actions-workflows/.github/workflows/ci-dockerized-app-build.yml@main - needs: [ monorepo ] - with: - organization: "cloudposse" - repository: ${{ github.event.repository.name }} - secrets: - ecr-region: ${{ secrets.ECR_REGION }} - ecr-iam-role: ${{ secrets.ECR_IAM_ROLE }} - registry: ${{ secrets.ECR_REGISTRY }} - secret-outputs-passphrase: ${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }} - - cd: - uses: cloudposse/github-actions-workflows/.github/workflows/cd-preview-ecspresso.yml@main - needs: [ ci, monorepo ] - if: ${{ always() && needs.monorepo.outputs.apps != '[]' }} - strategy: - matrix: - app: ${{ fromJson(needs.monorepo.outputs.apps) }} - with: - image: ${{ needs.ci.outputs.image }} - tag: ${{ needs.ci.outputs.tag }} - repository: ${{ github.event.repository.name }} - app: ${{ matrix.app }} - open: ${{ github.event.pull_request.state == 'open' }} - labels: ${{ toJSON(github.event.pull_request.labels.*.name) }} - ref: ${{ github.event.pull_request.head.ref }} - exclusive: true - enable-migration: ${{ contains(fromJSON(needs.monorepo.outputs.migrations), matrix.app) }} - settings: ${{ needs.monorepo.outputs.settings }} - env-label: | - qa1: deploy/qa1 - secrets: - secret-outputs-passphrase: ${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }} -``` +Check out our [example app-on-ecs](https://github.com/cloudposse-examples/app-on-ecs) for the latest example of how to use `ecspresso` with GitHub Actions. - - - - -```yaml -name: 2 - Main Branch -on: - push: - branches: [ main ] - -permissions: - contents: write - id-token: write - pull-requests: read - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: false - -jobs: - monorepo: - uses: cloudposse/github-actions-workflows/.github/workflows/controller-monorepo.yml@main - with: - file: ./deploy/config.yaml - - ci: - uses: cloudposse/github-actions-workflows/.github/workflows/ci-dockerized-app-build.yml@main - needs: [ monorepo ] - with: - organization: "cloudposse" - repository: ${{ github.event.repository.name }} - secrets: - ecr-region: ${{ secrets.ECR_REGION }} - ecr-iam-role: ${{ secrets.ECR_IAM_ROLE }} - registry: ${{ secrets.ECR_REGISTRY }} - secret-outputs-passphrase: ${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }} - - cd: - uses: cloudposse/github-actions-workflows/.github/workflows/cd-ecspresso.yml@main - needs: [ ci, monorepo ] - strategy: - matrix: - app: ${{ fromJson(needs.monorepo.outputs.apps) }} - with: - image: ${{ needs.ci.outputs.image }} - tag: ${{ needs.ci.outputs.tag }} - repository: ${{ github.event.repository.name }} - app: ${{ matrix.app }} - environment: dev - enable-migration: ${{ contains(fromJSON(needs.monorepo.outputs.migrations), matrix.app) }} - settings: ${{ needs.monorepo.outputs.settings }} - secrets: - secret-outputs-passphrase: ${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }} - - release: - uses: cloudposse/github-actions-workflows/.github/workflows/controller-draft-release.yml@main - needs: [ cd ] -``` +::: - - - - -```yaml -name: 3 - Release -on: - release: - types: [published] - -permissions: - id-token: write - contents: write - -concurrency: - group: ${{ github.workflow }} - cancel-in-progress: false - -jobs: - monorepo: - uses: cloudposse/github-actions-workflows/.github/workflows/controller-monorepo.yml@main - with: - file: ./deploy/config.yaml - - ci: - uses: cloudposse/github-actions-workflows/.github/workflows/ci-dockerized-app-promote.yml@main - needs: [ monorepo ] - with: - organization: "cloudposse" - repository: ${{ github.event.repository.name }} - version: ${{ github.event.release.tag_name }} - secrets: - ecr-region: ${{ secrets.ECR_REGION }} - ecr-iam-role: ${{ secrets.ECR_IAM_ROLE }} - registry: ${{ secrets.ECR_REGISTRY }} - secret-outputs-passphrase: ${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }} - - cd: - uses: cloudposse/github-actions-workflows/.github/workflows/cd-ecspresso.yml@main - needs: [ ci, monorepo ] - strategy: - matrix: - app: ${{ fromJson(needs.monorepo.outputs.apps) }} - with: - image: ${{ needs.ci.outputs.image }} - tag: ${{ needs.ci.outputs.tag }} - repository: ${{ github.event.repository.name }} - app: ${{ matrix.app }} - environment: "staging" - enable-migration: ${{ contains(fromJSON(needs.monorepo.outputs.migrations), matrix.app) }} - settings: ${{ needs.monorepo.outputs.settings }} - secrets: - secret-outputs-passphrase: ${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }} -``` - - + + + + ```yaml title=".github/workflows/feature-branch.yaml" + name: 1 - Feature Branch + on: + pull_request: + branches: [ main ] + types: [opened, synchronize, reopened, closed, labeled, unlabeled] + + permissions: + pull-requests: write + deployments: write + id-token: write + contents: read + + concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: false + + jobs: + monorepo: + uses: cloudposse/github-actions-workflows/.github/workflows/controller-monorepo.yml@main + with: + file: ./deploy/config.yaml + + ci: + uses: cloudposse/github-actions-workflows/.github/workflows/ci-dockerized-app-build.yml@main + needs: [ monorepo ] + with: + organization: "cloudposse" + repository: ${{ github.event.repository.name }} + secrets: + ecr-region: ${{ secrets.ECR_REGION }} + ecr-iam-role: ${{ secrets.ECR_IAM_ROLE }} + registry: ${{ secrets.ECR_REGISTRY }} + secret-outputs-passphrase: ${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }} + + cd: + uses: cloudposse/github-actions-workflows/.github/workflows/cd-preview-ecspresso.yml@main + needs: [ ci, monorepo ] + if: ${{ always() && needs.monorepo.outputs.apps != '[]' }} + strategy: + matrix: + app: ${{ fromJson(needs.monorepo.outputs.apps) }} + with: + image: ${{ needs.ci.outputs.image }} + tag: ${{ needs.ci.outputs.tag }} + repository: ${{ github.event.repository.name }} + app: ${{ matrix.app }} + open: ${{ github.event.pull_request.state == 'open' }} + labels: ${{ toJSON(github.event.pull_request.labels.*.name) }} + ref: ${{ github.event.pull_request.head.ref }} + exclusive: true + enable-migration: ${{ contains(fromJSON(needs.monorepo.outputs.migrations), matrix.app) }} + settings: ${{ needs.monorepo.outputs.settings }} + env-label: | + qa1: deploy/qa1 + secrets: + secret-outputs-passphrase: ${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }} + ``` + + + + + + ```yaml title=".github/workflows/main-branch.yaml" + name: 2 - Main Branch + on: + push: + branches: [ main ] + + permissions: + contents: write + id-token: write + pull-requests: read + + concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: false + + jobs: + monorepo: + uses: cloudposse/github-actions-workflows/.github/workflows/controller-monorepo.yml@main + with: + file: ./deploy/config.yaml + + ci: + uses: cloudposse/github-actions-workflows/.github/workflows/ci-dockerized-app-build.yml@main + needs: [ monorepo ] + with: + organization: "cloudposse" + repository: ${{ github.event.repository.name }} + secrets: + ecr-region: ${{ secrets.ECR_REGION }} + ecr-iam-role: ${{ secrets.ECR_IAM_ROLE }} + registry: ${{ secrets.ECR_REGISTRY }} + secret-outputs-passphrase: ${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }} + + cd: + uses: cloudposse/github-actions-workflows/.github/workflows/cd-ecspresso.yml@main + needs: [ ci, monorepo ] + strategy: + matrix: + app: ${{ fromJson(needs.monorepo.outputs.apps) }} + with: + image: ${{ needs.ci.outputs.image }} + tag: ${{ needs.ci.outputs.tag }} + repository: ${{ github.event.repository.name }} + app: ${{ matrix.app }} + environment: dev + enable-migration: ${{ contains(fromJSON(needs.monorepo.outputs.migrations), matrix.app) }} + settings: ${{ needs.monorepo.outputs.settings }} + secrets: + secret-outputs-passphrase: ${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }} + + release: + uses: cloudposse/github-actions-workflows/.github/workflows/controller-draft-release.yml@main + needs: [ cd ] + ``` + + + + + + ```yaml title=".github/workflows/release.yaml" + name: 3 - Release + on: + release: + types: [published] + + permissions: + id-token: write + contents: write + + concurrency: + group: ${{ github.workflow }} + cancel-in-progress: false + + jobs: + monorepo: + uses: cloudposse/github-actions-workflows/.github/workflows/controller-monorepo.yml@main + with: + file: ./deploy/config.yaml + + ci: + uses: cloudposse/github-actions-workflows/.github/workflows/ci-dockerized-app-promote.yml@main + needs: [ monorepo ] + with: + organization: "cloudposse" + repository: ${{ github.event.repository.name }} + version: ${{ github.event.release.tag_name }} + secrets: + ecr-region: ${{ secrets.ECR_REGION }} + ecr-iam-role: ${{ secrets.ECR_IAM_ROLE }} + registry: ${{ secrets.ECR_REGISTRY }} + secret-outputs-passphrase: ${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }} + + cd: + uses: cloudposse/github-actions-workflows/.github/workflows/cd-ecspresso.yml@main + needs: [ ci, monorepo ] + strategy: + matrix: + app: ${{ fromJson(needs.monorepo.outputs.apps) }} + with: + image: ${{ needs.ci.outputs.image }} + tag: ${{ needs.ci.outputs.tag }} + repository: ${{ github.event.repository.name }} + app: ${{ matrix.app }} + environment: "staging" + enable-migration: ${{ contains(fromJSON(needs.monorepo.outputs.migrations), matrix.app) }} + settings: ${{ needs.monorepo.outputs.settings }} + secrets: + secret-outputs-passphrase: ${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }} + ``` + + -
## References - [Ecspresso](https://github.com/kayac/ecspresso) : Tool repo diff --git a/docs/layers/software-delivery/ecs-ecspresso/ecs-partial-task-definitions.mdx b/docs/layers/software-delivery/ecs-ecspresso/ecs-partial-task-definitions.mdx new file mode 100644 index 000000000..828790cb9 --- /dev/null +++ b/docs/layers/software-delivery/ecs-ecspresso/ecs-partial-task-definitions.mdx @@ -0,0 +1,221 @@ +--- +title: "ECS Partial Task Definitions" +sidebar_label: "Partial Task Definitions" +sidebar_position: 20 +--- + +import Intro from '@site/src/components/Intro'; +import Steps from '@site/src/components/Steps'; + + + This document describes what partial task definitions are and how we can use them to set up ECS services using Terraform and GitHub Actions. + + +## The Problem + +Managing ECS Services is challenging. Ideally, we want our services to be managed by Terraform so everything is living +in code. However, we also want to update the task definition via GitOps as through the GitHub release lifecycle. This is +challenging because Terraform can create the task definition, but if updated by the application repository, the task +definition will be out of sync with the Terraform state. + +Managing it entirely through Terraform means we cannot easily update the newly built image by the application repository +unless we directly commit to the infrastructure repository, which is not ideal. + +Managing it entirely through the application repository means we cannot codify the infrastructure and have to hardcode +ARNs, secrets, and other infrastructure-specific configurations. + +## Introduction + +ECS Partial task definitions is the idea of breaking the task definition into smaller parts. This allows for easier +management of the task definition and makes it easier to update the task definition. + +We do this by setting up Terraform to manage a portion of the task definition, and the application repository to manage +another portion. + +The Terraform (infrastructure) portion is created first. It will create an ECS Service in ECS, and then upload the task +definition JSON to S3 as `task-template.json`.The application repository will have a `task-definition.json` git +controlled, during the development lifecycle, the application repository will download the task definition from S3, +merge the task definitions, then update the ECS Service with the new task definition. Finally, GitHub actions will +update the S3 bucket with the deployed task definition under `task-definition.json`. If Terraform is planned again, it +will use the new task definition as the base for the next deployment, thus not resetting the image or application +configuration. + +
+ +### Pros + +The **benefit** to using this approach is that we can manage the task definition portion in Terraform with the +infrastructure, meaning secrets, volumes, and other ARNs can be managed in Terraform. If a filesystem ID updates we can +re-apply Terraform to update the task definition with the new filesystem ID. The application repository can manage the +container definitions, environment variables, and other application-specific configurations. This allows developers who +are closer to the application to quickly update the environment variables or other configuration. + +### Cons + +The drawback to this approach is that it is more complex than managing the task definition entirely in Terraform or the +application repository. It requires more setup and more moving parts. It can be confusing for a developer who is not +familiar with the setup to understand how the task definition is being managed and deployed. + +This also means that when something goes wrong, it becomes harder to troubleshoot as there are more moving parts. + +### Getting Setup + +#### Pre-requisites + +- Application Repository - [Cloud Posse Example ECS Application](https://github.com/cloudposse-examples/app-on-ecs) +- Infrastructure Repository +- ECS Cluster - [Cloud Posse Docs](https://docs.cloudposse.com/components/library/aws/ecs/) - + [Component](https://github.com/cloudposse/Terraform-aws-components/tree/main/modules/ecs). +- `ecs-service` - [Cloud Posse Docs](https://docs.cloudposse.com/components/library/aws/ecs-service/) - + [Component](https://github.com/cloudposse/Terraform-aws-components/tree/main/modules/ecs-service). + - **Must** use the Cloud Posse Component. + - [`v1.416.0`](https://github.com/cloudposse/Terraform-aws-components/releases/tag/1.416.0) or later. +- S3 Bucket - [Cloud Posse Docs](https://docs.cloudposse.com/components/library/aws/s3-bucket/) - + [Component](https://github.com/cloudposse/Terraform-aws-components/tree/main/modules/s3-bucket). + +#### Steps + + + +1. Set up the S3 Bucket that will store the task definition. + +
This bucket should be in the same account as the ECS Cluster. + +
+
+ S3 Bucket Default Definition + + ```yaml + components: + terraform: + s3-bucket/defaults: + metadata: + type: abstract + vars: + enabled: true + account_map_tenant_name: core + # Suggested configuration for all buckets + user_enabled: false + acl: "private" + grants: null + force_destroy: false + versioning_enabled: false + allow_encrypted_uploads_only: true + block_public_acls: true + block_public_policy: true + ignore_public_acls: true + restrict_public_buckets: true + allow_ssl_requests_only: true + lifecycle_configuration_rules: + - id: default + enabled: true + abort_incomplete_multipart_upload_days: 90 + filter_and: + prefix: "" + tags: {} + # Move to Glacier after 2 years + transition: + - storage_class: GLACIER + days: 730 + # Never expire + expiration: {} + # Versioning isnt enabled, but these default values are still required + noncurrent_version_transition: + - storage_class: GLACIER + days: 90 + noncurrent_version_expiration: {} + ``` + +
+ + ```yaml + import: + - catalog/s3-bucket/defaults + + components: + terraform: + s3-bucket/ecs-tasks-mirror: #NOTE this is the component instance name. + metadata: + component: s3-bucket + inherits: + - s3-bucket/defaults + vars: + enabled: true + name: ecs-tasks-mirror + ``` + +2. Create an ECS Service in Terraform + +
Set up the ECS Service in Terraform using the + [`ecs-service` component](https://github.com/cloudposse/Terraform-aws-components/tree/main/modules/ecs-service). This + will create the ECS Service and upload the task definition to the S3 bucket. + +
To enable Partial Task Definitions, set the variable `s3_mirror_name` to be the component instance name of the + bucket to mirror to. For example `s3-bucket/ecs-tasks-mirror` + + ```yaml + components: + terraform: + ecs-services/defaults: + metadata: + component: ecs-service + type: abstract + vars: + enabled: true + ecs_cluster_name: "ecs/cluster" + s3_mirror_name: s3-bucket/ecs-tasks-mirror + ``` + +3. Set up an Application repository with GitHub workflows. + + An example application repository can be found [here](https://github.com/cloudposse-examples/app-on-ecs). + +
Two things need to be pulled from this repository: + + - The `task-definition.json` file under `deploy/task-definition.json` + - The GitHub Workflows. + + An important note about the GitHub Workflows, in the example repository they all live under `.github/workflows`. This + is done so development of workflows can be fast, however we recommend moving the shared workflows to a separate + repository and calling them from the application repository. The application repository should only contain the + workflows `main-branch.yaml`, `release.yaml` and `feature-branch.yml`. + +
To enable Partial Task Definitions in the workflows, the call to + [`cloudposse/github-action-run-ecspresso` (link)](https://github.com/cloudposse-examples/app-on-ecs/blob/main/.github/workflows/workflow-cd-ecspresso.yml#L133-L147) + should have the input `mirror_to_s3_bucket` set to the S3 bucket name. the variable `use_partial_taskdefinition` + should be set to `'true'` + +
+ Example GitHub Action Step + + ```yaml + - name: Deploy + uses: cloudposse/github-action-deploy-ecspresso@0.6.0 + continue-on-error: true + if: ${{ steps.db_migrate.outcome != 'failure' }} + id: deploy + with: + image: ${{ steps.image.outputs.out }} + image-tag: ${{ inputs.tag }} + region: ${{ steps.environment.outputs.region }} + operation: deploy + debug: false + cluster: ${{ steps.environment.outputs.cluster }} + application: ${{ steps.environment.outputs.name }} + taskdef-path: ${{ inputs.path }} + mirror_to_s3_bucket: ${{ steps.environment.outputs.s3-bucket }} + use_partial_taskdefinition: "true" + timeout: 10m + ``` + +
+ +
+ +## Operation + +Changes through Terraform will not immediately be reflected in the ECS Service. This is because the task template has +been updated, but whatever was in the `task-definition.json` file in the S3 bucket will be used for deployment. + +To update the ECS Service after updating the Terraform for it, you must deploy through GitHub Actions. This will then +download the new template and create a new updated `task-definition.json` to store in s3. diff --git a/docs/layers/software-delivery/ecs-ecspresso/setup.mdx b/docs/layers/software-delivery/ecs-ecspresso/setup.mdx index 9ee445561..147c4c414 100644 --- a/docs/layers/software-delivery/ecs-ecspresso/setup.mdx +++ b/docs/layers/software-delivery/ecs-ecspresso/setup.mdx @@ -23,7 +23,8 @@ import AtmosWorkflow from '@site/src/components/AtmosWorkflow'; | 3. Validate the environment configuration | Click Ops | | 4. Create a GitHub PAT | Click Ops | | 5. Set all Example App repository secrets | Click Ops | -| 6. Deploy the example ECS services | `atmos workflow deploy/app-on-ecs -f app-on-ecs` | +| 6. Deploy the shared ECS Task Definition S3 Bucket | `atmos apply s3-bucket/ecs-tasks-mirror -s < YOUR STACK >` | +| 7. Deploy the example ECS services | `atmos workflow deploy/app-on-ecs -f app-on-ecs` | @@ -306,6 +307,12 @@ We do not recommend keeping all shared workflows in the same repository as in th
+ + ### Configure the S3 Mirror Bucket, if not already configured + + If you haven't already configured the S3 mirror bucket, deploy and configure the shared S3 bucket for ECS tasks definitions now. Follow the [ECS Partial Task Definitions guide](/layers/software-delivery/ecs-ecspresso/ecs-partial-task-definitions/#steps) + + ### Deploy the Example App ECS Service @@ -368,7 +375,7 @@ We do not recommend keeping all shared workflows in the same repository as in th - Apply this component with the following: + Finally, apply the `ecs-services/example-app-on-ecs` component to deploy the Example App ECS service. diff --git a/docs/layers/software-delivery/eks-argocd/eks-argocd.mdx b/docs/layers/software-delivery/eks-argocd/eks-argocd.mdx index aefc215fe..73ee7a393 100644 --- a/docs/layers/software-delivery/eks-argocd/eks-argocd.mdx +++ b/docs/layers/software-delivery/eks-argocd/eks-argocd.mdx @@ -8,6 +8,7 @@ import KeyPoints from '@site/src/components/KeyPoints'; import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import Note from '@site/src/components/Note'; +import CollapsibleText from '@site/src/components/CollapsibleText'; Argo CD is an open-source declarative, GitOps continuous delivery tool for Kubernetes applications. It enables developers to manage and deploy applications on Kubernetes clusters using Git repositories as the source of truth for configuration and definitions. Argo CD follows the GitOps methodology, which means that the entire application configuration, including manifests, parameters, and even application state, is stored in a Git repository. @@ -97,106 +98,102 @@ sequenceDiagram Application repository will create a deployment when a workflow is triggered and call the relevant shared workflow. -
-Deploy +:::tip Latest Examples - - - - -```yaml -# .github/workflows/feature-branch.yaml -name: Feature Branch -on: - pull_request: - branches: [ 'main' ] - types: [opened, synchronize, reopened, closed, labeled, unlabeled] - -permissions: - pull-requests: write - deployments: write - id-token: write - contents: read - -jobs: - do: - uses: cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd/.github/workflows/feature-branch.yml@main - with: - organization: "${{ github.event.repository.owner.login }}" - repository: "${{ github.event.repository.name }}" - open: ${{ github.event.pull_request.state == 'open' }} - labels: ${{ toJSON(github.event.pull_request.labels.*.name) }} - ref: ${{ github.event.pull_request.head.ref }} - secrets: - github-private-actions-pat: "${{ secrets.PUBLIC_REPO_ACCESS_TOKEN }}" - registry: "${{ secrets.ECR_REGISTRY }}" - secret-outputs-passphrase: "${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }}" - ecr-region: "${{ secrets.ECR_REGION }}" - ecr-iam-role: "${{ secrets.ECR_IAM_ROLE }}" -``` - - - - - -```yaml -# .github/workflows/main-branch.yaml -name: Main Branch -on: - push: - branches: [ main ] - -permissions: - contents: write - id-token: write - -jobs: - do: - uses: cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd/.github/workflows/main-branch.yml@main - with: - organization: "${{ github.event.repository.owner.login }}" - repository: "${{ github.event.repository.name }}" - secrets: - github-private-actions-pat: "${{ secrets.PUBLIC_REPO_ACCESS_TOKEN }}" - registry: "${{ secrets.ECR_REGISTRY }}" - secret-outputs-passphrase: "${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }}" - ecr-region: "${{ secrets.ECR_REGION }}" - ecr-iam-role: "${{ secrets.ECR_IAM_ROLE }}" -``` - - - - +Check out our [example app-on-eks-with-argocd](https://github.com/cloudposse-examples/app-on-eks-with-argocd) for the latest example of how to use ArgoCD with GitHub Actions. -```yaml -# .github/workflows/release.yaml -name: Release -on: - release: - types: [published] - -permissions: - id-token: write - contents: write - -jobs: - perform: - uses: cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd/.github/workflows/release.yml@main - with: - organization: "${{ github.event.repository.owner.login }}" - repository: "${{ github.event.repository.name }}" - version: ${{ github.event.release.tag_name }} - secrets: - github-private-actions-pat: "${{ secrets.PUBLIC_REPO_ACCESS_TOKEN }}" - registry: "${{ secrets.ECR_REGISTRY }}" - secret-outputs-passphrase: "${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }}" - ecr-region: "${{ secrets.ECR_REGION }}" - ecr-iam-role: "${{ secrets.ECR_IAM_ROLE }}" -``` +::: - + + + + ```yaml title=".github/workflows/feature-branch.yaml" + name: Feature Branch + on: + pull_request: + branches: [ 'main' ] + types: [opened, synchronize, reopened, closed, labeled, unlabeled] + + permissions: + pull-requests: write + deployments: write + id-token: write + contents: read + + jobs: + do: + uses: cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd/.github/workflows/feature-branch.yml@main + with: + organization: "${{ github.event.repository.owner.login }}" + repository: "${{ github.event.repository.name }}" + open: ${{ github.event.pull_request.state == 'open' }} + labels: ${{ toJSON(github.event.pull_request.labels.*.name) }} + ref: ${{ github.event.pull_request.head.ref }} + secrets: + github-private-actions-pat: "${{ secrets.PUBLIC_REPO_ACCESS_TOKEN }}" + registry: "${{ secrets.ECR_REGISTRY }}" + secret-outputs-passphrase: "${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }}" + ecr-region: "${{ secrets.ECR_REGION }}" + ecr-iam-role: "${{ secrets.ECR_IAM_ROLE }}" + ``` + + + + + ```yaml title=".github/workflows/main-branch.yaml" + name: Main Branch + on: + push: + branches: [ main ] + + permissions: + contents: write + id-token: write + + jobs: + do: + uses: cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd/.github/workflows/main-branch.yml@main + with: + organization: "${{ github.event.repository.owner.login }}" + repository: "${{ github.event.repository.name }}" + secrets: + github-private-actions-pat: "${{ secrets.PUBLIC_REPO_ACCESS_TOKEN }}" + registry: "${{ secrets.ECR_REGISTRY }}" + secret-outputs-passphrase: "${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }}" + ecr-region: "${{ secrets.ECR_REGION }}" + ecr-iam-role: "${{ secrets.ECR_IAM_ROLE }}" + ``` + + + + + ```yaml title=".github/workflows/release.yaml" + name: Release + on: + release: + types: [published] + + permissions: + id-token: write + contents: write + + jobs: + perform: + uses: cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd/.github/workflows/release.yml@main + with: + organization: "${{ github.event.repository.owner.login }}" + repository: "${{ github.event.repository.name }}" + version: ${{ github.event.release.tag_name }} + secrets: + github-private-actions-pat: "${{ secrets.PUBLIC_REPO_ACCESS_TOKEN }}" + registry: "${{ secrets.ECR_REGISTRY }}" + secret-outputs-passphrase: "${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }}" + ecr-region: "${{ secrets.ECR_REGION }}" + ecr-iam-role: "${{ secrets.ECR_IAM_ROLE }}" + ``` + + -
That workflow calls a Reusable Workflow, `cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd`, that is designed to deploy a dockerized application from ECR to EKS using ArgoCD specifically. @@ -207,114 +204,107 @@ Hotfix workflows are designed to push changes directly to a released version in In order to enable hotfix workflows, create two additional workflows and modify the existing release workflow. See each of the following workflows: -
-Hotfix Workflows - - - -Before running any hotfix workflows, we must first create release branches with any release. Modify the existing release workflow to include the `hotfix` job below. - -```yaml -# .github/workflows/release.yaml -name: Release -on: - release: - types: [published] - -permissions: - id-token: write - contents: write - -jobs: - perform: - ... - - hotfix: - name: release / branch - uses: cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd/.github/workflows/hotfix-mixin.yml@main - with: - version: ${{ github.event.release.tag_name }} -``` - - - - -This `hotfix-branch.yaml` workflow will deploy a duplicate app in the _production_ cluster to a new namespace. We need to deploy to production to validate a hotfix directly for production. - -Deploy this workflow by creating a Pull Request into the a release branch and adding the `deploy` label. - -```yaml -# .github/workflows/hotfix-branch.yaml -name: Hotfix Branch -on: - pull_request: - branches: [ 'release/**' ] - types: [opened, synchronize, reopened, closed, labeled, unlabeled] - -permissions: - pull-requests: write - deployments: write - id-token: write - contents: read - -jobs: - do: - uses: cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd/.github/workflows/hotfix-branch.yml@main - with: - organization: "${{ github.event.repository.owner.login }}" - repository: "${{ github.event.repository.name }}" - open: ${{ github.event.pull_request.state == 'open' }} - labels: ${{ toJSON(github.event.pull_request.labels.*.name) }} - ref: ${{ github.event.pull_request.head.ref }} - path: deploy - secrets: - github-private-actions-pat: "${{ secrets.PRIVATE_REPO_ACCESS_TOKEN }}" - registry: "${{ secrets.ECR_REGISTRY }}" - secret-outputs-passphrase: "${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }}" - ecr-region: "${{ secrets.ECR_REGION }}" - ecr-iam-role: "${{ secrets.ECR_IAM_ROLE }}" -``` - - - - -Once we've validated a Pull Request for a given hotfix, we can merge that change into the release branch. When changes are pushed to a release branch, the "Hotfix Release" workflow is triggered. _This workflow will deploy the given change directly to production_. - -Before deploying, the workflow will create a minor version release and test it. -After the deployment, it will create a reintegration pull request to bring the hotfix back into the main branch and lower environments. - -In order to enable the "Hotfix Release" workflow, add the following: - -```yaml -# .github/workflows/hotfix-release.yaml -name: Hotfix Release -on: - push: - branches: [ 'release/**' ] - -permissions: - contents: write - id-token: write - -jobs: - do: - uses: cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd/.github/workflows/hotfix-release.yml@main - with: - organization: "${{ github.event.repository.owner.login }}" - repository: "${{ github.event.repository.name }}" - path: deploy - secrets: - github-private-actions-pat: "${{ secrets.PRIVATE_REPO_ACCESS_TOKEN }}" - registry: "${{ secrets.ECR_REGISTRY }}" - secret-outputs-passphrase: "${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }}" - ecr-region: "${{ secrets.ECR_REGION }}" - ecr-iam-role: "${{ secrets.ECR_IAM_ROLE }}" -``` - - + + Before running any hotfix workflows, we must first create release branches with any release. Modify the existing release workflow to include the `hotfix` job below. + + ```yaml title=".github/workflows/release.yaml" + name: Release + on: + release: + types: [published] + + permissions: + id-token: write + contents: write + + jobs: + perform: + ... + + hotfix: + name: release / branch + uses: cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd/.github/workflows/hotfix-mixin.yml@main + with: + version: ${{ github.event.release.tag_name }} + ``` + + + + This `hotfix-branch.yaml` workflow will deploy a duplicate app in the _production_ cluster to a new namespace. We need to deploy to production to validate a hotfix directly for production. + + Deploy this workflow by creating a Pull Request into the a release branch and adding the `deploy` label. + + + ```yaml title=".github/workflows/hotfix-branch.yaml" + name: Hotfix Branch + on: + pull_request: + branches: [ 'release/**' ] + types: [opened, synchronize, reopened, closed, labeled, unlabeled] + + permissions: + pull-requests: write + deployments: write + id-token: write + contents: read + + jobs: + do: + uses: cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd/.github/workflows/hotfix-branch.yml@main + with: + organization: "${{ github.event.repository.owner.login }}" + repository: "${{ github.event.repository.name }}" + open: ${{ github.event.pull_request.state == 'open' }} + labels: ${{ toJSON(github.event.pull_request.labels.*.name) }} + ref: ${{ github.event.pull_request.head.ref }} + path: deploy + secrets: + github-private-actions-pat: "${{ secrets.PRIVATE_REPO_ACCESS_TOKEN }}" + registry: "${{ secrets.ECR_REGISTRY }}" + secret-outputs-passphrase: "${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }}" + ecr-region: "${{ secrets.ECR_REGION }}" + ecr-iam-role: "${{ secrets.ECR_IAM_ROLE }}" + ``` + + + + Once we've validated a Pull Request for a given hotfix, we can merge that change into the release branch. When changes are pushed to a release branch, the "Hotfix Release" workflow is triggered. _This workflow will deploy the given change directly to production_. + + Before deploying, the workflow will create a minor version release and test it. + After the deployment, it will create a reintegration pull request to bring the hotfix back into the main branch and lower environments. + + In order to enable the "Hotfix Release" workflow, add the following: + + + ```yaml title=".github/workflows/hotfix-release.yaml" + name: Hotfix Release + on: + push: + branches: [ 'release/**' ] + + permissions: + contents: write + id-token: write + + jobs: + do: + uses: cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd/.github/workflows/hotfix-release.yml@main + with: + organization: "${{ github.event.repository.owner.login }}" + repository: "${{ github.event.repository.name }}" + path: deploy + secrets: + github-private-actions-pat: "${{ secrets.PRIVATE_REPO_ACCESS_TOKEN }}" + registry: "${{ secrets.ECR_REGISTRY }}" + secret-outputs-passphrase: "${{ secrets.GHA_SECRET_OUTPUT_PASSPHRASE }}" + ecr-region: "${{ secrets.ECR_REGION }}" + ecr-iam-role: "${{ secrets.ECR_IAM_ROLE }}" + ``` + + + -
These workflows also call the same Reusuable Workflow repository, `cloudposse/github-actions-workflows-docker-ecr-eks-helm-argocd`, as well as several of the same Reusuable Workflows called from that repository. For example, `cloudposse/github-actions-workflows` and `cloudposse/actions-private`. diff --git a/docs/layers/software-delivery/eks-argocd/tutorials/github-commit-notifications.mdx b/docs/layers/software-delivery/eks-argocd/tutorials/github-commit-notifications.mdx index ce2a73630..1574c134a 100644 --- a/docs/layers/software-delivery/eks-argocd/tutorials/github-commit-notifications.mdx +++ b/docs/layers/software-delivery/eks-argocd/tutorials/github-commit-notifications.mdx @@ -80,7 +80,7 @@ Our implementation of Argo CD breaks up notifications into "notifiers", "templat "body": "{\"context\":\"continuous-delivery/{{.app.metadata.name}}\",\"description\":\"ArgoCD\",\"state\":\"pending\",\"target_url\":\"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}\"}" "method": "POST" "path": "/repos/{{call .repo.FullNameByRepoURL .app.spec.source.repoURL}}/statuses/{{.app.status.operationState.operation.sync.revision}}" - template.app-deploy-succeded: | + template.app-deploy-succeeded: | "alertmanager": null "message": "Application {{ .app.metadata.name }} is now running new version of deployments manifests." @@ -103,7 +103,7 @@ Our implementation of Argo CD breaks up notifications into "notifiers", "templat eks/argocd: vars: notifications_templates: - app-deploy-succeded: + app-deploy-succeeded: message: "Application {{ .app.metadata.name }} is now running new version of deployments" webhook: foo-repo-github-commit: @@ -146,15 +146,15 @@ Our implementation of Argo CD breaks up notifications into "notifiers", "templat - "app-deploy-started" "when": "app.status.operationState.phase in ['Running'] or ( app.status.operationState.phase == 'Succeeded' and app.status.health.status == 'Progressing' )" - trigger.on-deploy-succeded: | + trigger.on-deploy-succeeded: | - "oncePer": "app.status.sync.revision" "send": - - "app-deploy-succeded" + - "app-deploy-succeeded" "when": "app.status.operationState.phase == 'Succeeded' and app.status.health.status == 'Healthy'" ``` - These triggers may trigger _multiple templates_. For example `trigger.on-deploy-succeded` triggers both `template.app-deploy-succeded.webhook.app-repo-github-commit-status` and `template.app-deploy-succeded.webhook.argocd-repo-github-commit-status`. + These triggers may trigger _multiple templates_. For example `trigger.on-deploy-succeeded` triggers both `template.app-deploy-succeeded.webhook.app-repo-github-commit-status` and `template.app-deploy-succeeded.webhook.argocd-repo-github-commit-status`. diff --git a/docs/layers/software-delivery/lambda/lambda.mdx b/docs/layers/software-delivery/lambda/lambda.mdx index 72ee82cdd..3266301a7 100644 --- a/docs/layers/software-delivery/lambda/lambda.mdx +++ b/docs/layers/software-delivery/lambda/lambda.mdx @@ -8,6 +8,7 @@ import Intro from '@site/src/components/Intro'; import KeyPoints from '@site/src/components/KeyPoints'; import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +import CollapsibleText from '@site/src/components/CollapsibleText'; Deploy Lambda functions using GitHub Workflows with a code-driven approach. The build process updates S3 with assets and SSM with the new version, requiring a Terraform run for promotion. GitHub Workflows manage the entire lifecycle, from building and packaging Lambda functions to deploying them with reusable workflows. @@ -58,184 +59,175 @@ flowchart LR Application repository updates S3 with build assets, then updates SSM with the new version. Each SSM update is basically a promotion, and requires a Terraform run to realize the change. -
-Build and Dev Promote - - - - ```yaml - # .github/workflows/reusable-publish-lambda-zip.yaml - name: Publish Lambda Function - on: - workflow_call: - inputs: - function-name: - required: true - type: string - source-folder: - required: true - type: string - artifacts-bucket-and-prefix: - required: true - type: string - aws-region: - required: true - type: string - secrets: - cicd-role-arn: - required: true - - permissions: - id-token: write - contents: read - - jobs: - publish: - runs-on: self-hosted - steps: - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ inputs.cicd-role-arn }} - aws-region: ${{ inputs.aws-region }} - - name: Checkout - uses: actions/checkout@v4 - - name: Package Lambda - run: | - cd ${{ inputs.source-folder }} && zip ${{ github.sha }}.zip * - - name: Push Lambda - run: | - aws s3 cp ${{ inputs.source-folder }}/${{ github.sha }}.zip s3://${{ inputs.artifacts-bucket-and-prefix }}/${{ inputs.function-name }}/ --sse - - name: Write tag to SSM - run: | - aws ssm put-parameter --name /lambda/${{ inputs.function-name}}/tag --type String --value ${{ github.sha }} --overwrite - ``` - + + ```yaml title=".github/workflows/reusable-publish-lambda-zip.yaml" + name: Publish Lambda Function + on: + workflow_call: + inputs: + function-name: + required: true + type: string + source-folder: + required: true + type: string + artifacts-bucket-and-prefix: + required: true + type: string + aws-region: + required: true + type: string + secrets: + cicd-role-arn: + required: true + + permissions: + id-token: write + contents: read + + jobs: + publish: + runs-on: self-hosted + steps: + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ inputs.cicd-role-arn }} + aws-region: ${{ inputs.aws-region }} + - name: Checkout + uses: actions/checkout@v4 + - name: Package Lambda + run: | + cd ${{ inputs.source-folder }} && zip ${{ github.sha }}.zip * + - name: Push Lambda + run: | + aws s3 cp ${{ inputs.source-folder }}/${{ github.sha }}.zip s3://${{ inputs.artifacts-bucket-and-prefix }}/${{ inputs.function-name }}/ --sse + - name: Write tag to SSM + run: | + aws ssm put-parameter --name /lambda/${{ inputs.function-name}}/tag --type String --value ${{ github.sha }} --overwrite + ``` + - - - ```yaml - # .github/workflows/reusable-promote-lambda-zip.yaml - name: Publish Lambda Function - on: - workflow_call: - inputs: - function-name: - required: true - type: string - artifacts-bucket-and-prefix: - required: true - type: string - aws-region: - required: true - type: string - secrets: - cicd-role-arn: - required: true - staging-role-arn: - required: true - prod-role-arn: - required: true - - permissions: - id-token: write - contents: read - - jobs: - publish: - runs-on: self-hosted - steps: - - name: Configure AWS credentials for 'cicd' role - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ inputs.cicd-role-arn }} - aws-region: ${{ inputs.aws-region }} - - name: Configure AWS credentials for source stage - uses: aws-actions/configure-aws-credentials@v4 - with: - aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} - aws-session-token: ${{ env.AWS_SESSION_TOKEN }} - role-duration-seconds: 3000 - role-skip-session-tagging: true - role-to-assume: ${{ inputs.staging-role-arn }} - aws-region: ${{ inputs.aws-region }} - - name: Checkout - uses: actions/checkout@v4 - - name: Get tag from SSM - id: get-tag-from-ssm - run: | - TAG=`aws ssm get-parameter --name /lambda/${{ inputs.function-name }}/tag | jq -r .Parameter.Value` - echo "tag=$TAG" >> $GITHUB_OUTPUT - - name: Copy Lambda to local - run: | - aws s3 cp s3://${{ inputs.artifacts-bucket-and-prefix }}/${{ inputs.function-name }}/${{ steps.get-tag-from-ssm.outputs.tag }}.zip . - - name: Configure AWS credentials for 'cicd' role - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ inputs.cicd-role-arn }} - aws-region: ${{ inputs.aws-region }} - - name: Configure AWS credentials for destination stage - uses: aws-actions/configure-aws-credentials@v4 - with: - aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} - aws-session-token: ${{ env.AWS_SESSION_TOKEN }} - role-duration-seconds: 3000 - role-skip-session-tagging: true - role-to-assume: ${{ inputs.prod-role-arn }} - aws-region: ${{ inputs.aws-region }} - - name: Copy Lambda to destination bucket - run: | - aws s3 cp ${{ steps.get-tag-from-ssm.outputs.tag }}.zip \ - s3://${{ inputs.artifacts-bucket-and-prefix }}/${{ inputs.function-name }}/ --sse - - name: Write tag to SSM - run: | - aws ssm put-parameter --name /lambda/${{ inputs.function-name}}/tag --type String --value ${{ steps.get-tag-from-ssm.outputs.tag }} --overwrite - ``` - + + ```yaml title=".github/workflows/reusable-promote-lambda-zip.yaml" + name: Publish Lambda Function + on: + workflow_call: + inputs: + function-name: + required: true + type: string + artifacts-bucket-and-prefix: + required: true + type: string + aws-region: + required: true + type: string + secrets: + cicd-role-arn: + required: true + staging-role-arn: + required: true + prod-role-arn: + required: true + + permissions: + id-token: write + contents: read + + jobs: + publish: + runs-on: self-hosted + steps: + - name: Configure AWS credentials for 'cicd' role + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ inputs.cicd-role-arn }} + aws-region: ${{ inputs.aws-region }} + - name: Configure AWS credentials for source stage + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ env.AWS_SESSION_TOKEN }} + role-duration-seconds: 3000 + role-skip-session-tagging: true + role-to-assume: ${{ inputs.staging-role-arn }} + aws-region: ${{ inputs.aws-region }} + - name: Checkout + uses: actions/checkout@v4 + - name: Get tag from SSM + id: get-tag-from-ssm + run: | + TAG=`aws ssm get-parameter --name /lambda/${{ inputs.function-name }}/tag | jq -r .Parameter.Value` + echo "tag=$TAG" >> $GITHUB_OUTPUT + - name: Copy Lambda to local + run: | + aws s3 cp s3://${{ inputs.artifacts-bucket-and-prefix }}/${{ inputs.function-name }}/${{ steps.get-tag-from-ssm.outputs.tag }}.zip . + - name: Configure AWS credentials for 'cicd' role + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ inputs.cicd-role-arn }} + aws-region: ${{ inputs.aws-region }} + - name: Configure AWS credentials for destination stage + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} + aws-session-token: ${{ env.AWS_SESSION_TOKEN }} + role-duration-seconds: 3000 + role-skip-session-tagging: true + role-to-assume: ${{ inputs.prod-role-arn }} + aws-region: ${{ inputs.aws-region }} + - name: Copy Lambda to destination bucket + run: | + aws s3 cp ${{ steps.get-tag-from-ssm.outputs.tag }}.zip \ + s3://${{ inputs.artifacts-bucket-and-prefix }}/${{ inputs.function-name }}/ --sse + - name: Write tag to SSM + run: | + aws ssm put-parameter --name /lambda/${{ inputs.function-name}}/tag --type String --value ${{ steps.get-tag-from-ssm.outputs.tag }} --overwrite + ``` + - - ```yaml - # .github/workflows/reusable-promote-lambda-zip.yaml - name: Deploy Lambda via Spacelift - on: - workflow_call: - inputs: - function-name: - required: true - type: string - stack: - required: true - type: string - secrets: - spacelift-api-key-id: - required: true - spacelift-api-key-secret: - required: true - - jobs: - deploy: - runs-on: self-hosted - container: 123456789012.dkr.ecr.us-east-2.amazonaws.com/acme/infra:latest - steps: - - name: Trigger Spacelift Stack Execution - env: - SPACELIFT_API_ENDPOINT: https://acme.app.spacelift.io - SPACELIFT_API_KEY_ID: ${{ secrets.spacelift-api-key-id }} - SPACELIFT_API_KEY_SECRET: ${{ secrets.spacelift-api-key-secret }} - run: | - spacectl stack deploy --id ${{ inputs.stack }}-lambda-${{ inputs.function-name}} --tail - ``` - + + ```yaml title=".github/workflows/reusable-promote-lambda-zip.yaml" + name: Deploy Lambda via Spacelift + on: + workflow_call: + inputs: + function-name: + required: true + type: string + stack: + required: true + type: string + secrets: + spacelift-api-key-id: + required: true + spacelift-api-key-secret: + required: true + + jobs: + deploy: + runs-on: self-hosted + container: 123456789012.dkr.ecr.us-east-2.amazonaws.com/acme/infra:latest + steps: + - name: Trigger Spacelift Stack Execution + env: + SPACELIFT_API_ENDPOINT: https://acme.app.spacelift.io + SPACELIFT_API_KEY_ID: ${{ secrets.spacelift-api-key-id }} + SPACELIFT_API_KEY_SECRET: ${{ secrets.spacelift-api-key-secret }} + run: | + spacectl stack deploy --id ${{ inputs.stack }}-lambda-${{ inputs.function-name}} --tail + ``` + -
### Implementation diff --git a/docs/layers/spacelift/design-decisions/design-decisions.mdx b/docs/layers/spacelift/design-decisions/design-decisions.mdx index 695dc351f..84b3936d3 100644 --- a/docs/layers/spacelift/design-decisions/design-decisions.mdx +++ b/docs/layers/spacelift/design-decisions/design-decisions.mdx @@ -7,7 +7,7 @@ import DocCardList from '@theme/DocCardList'; import Intro from '@site/src/components/Intro'; -Review the key design decisions for how you'll leverage Spacelift for continous delivery of your infrastructure with Terraform and Atmos. +Review the key design decisions for how you'll leverage Spacelift for continuous delivery of your infrastructure with Terraform and Atmos. diff --git a/docs/learn/onboarding.mdx b/docs/learn/onboarding.mdx index efd316ea2..8ca160ee8 100644 --- a/docs/learn/onboarding.mdx +++ b/docs/learn/onboarding.mdx @@ -9,6 +9,7 @@ import Steps from '@site/src/components/Steps'; import Step from '@site/src/components/Step'; import StepNumber from '@site/src/components/StepNumber'; import TaskList from '@site/src/components/TaskList'; +import ReactPlayer from 'react-player'; This guide is intended to help new developers get up to speed with the Cloud Posse reference architecture. It covers the basics of the architecture, how to get started, and how to contribute. We assume you have a basic understanding of Terraform and AWS. We assume you are joining a team that is already using the Cloud Posse reference architecture that's been fully implemented in your AWS organization. @@ -44,11 +45,16 @@ If you're new to Terraform, we recommend starting with the [Terraform documentat ### Learn the Cloud Posse Toolchain +
+ +
AI generated voice
+
+ - - [ ] Read through the [Introduction to Toolset](#introduction-to-toolset) slides and become familiar with the concepts - of [Components](/components), [Stacks](/resources/legacy/fundamentals/stacks), and + - [ ] Review the Introduction to Toolset video and become familiar with the concepts + of [Components](/components), [Stacks](https://atmos.tools/core-concepts/stacks/), and [Stack Catalogs](https://atmos.tools/core-concepts/stacks/catalogs) - - [ ] Review [Geodesic](/resources/legacy/fundamentals/geodesic) docker toolbox and + - [ ] Review [Geodesic](https://github.com/cloudposse/geodesic/) docker toolbox and [How to Customize the Geodesic Shell](/learn/maintenance/tutorials/how-to-customize-the-geodesic-shell) to your liking. - [ ] Review [atmos.tools](https://atmos.tools/) @@ -87,7 +93,7 @@ If you're new to Terraform, we recommend starting with the [Terraform documentat ### How we Use Terraform (for developers) - - [ ] Review [Terraform](/resources/legacy/fundamentals/terraform) conventions used by Cloud Posse + - [ ] Review [Terraform](/best-practices/terraform/) conventions used by Cloud Posse - [ ] We store all the terraform states in S3. Make sure you understand the [Structure of Terraform S3 State Backend Bucket](/layers/accounts/tutorials/terraform-s3-state) - [ ] Learn [How to Use Terraform Remote State](/learn/maintenance/tutorials/how-to-use-terraform-remote-state) - diff --git a/docusaurus.config.js b/docusaurus.config.js index c067e7233..969211a5a 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -4,6 +4,10 @@ const darkCodeTheme = require('prism-react-renderer').themes.nightOwl; const fs = require('fs'); const path = require('path'); +// Redirects handling: +const { getStaticRedirects } = require('./plugins/staticRedirects'); +const { redirectsPlugin } = require('./plugins/dynamicRedirects'); + // Define the directory containing your CSS files const cssDirectory = path.resolve(__dirname, './src/css'); @@ -32,296 +36,295 @@ function metadataPlugin(context, options) { } } -/** @type {import('@docusaurus/types').Config} */ -const config = { - title: 'The Cloud Posse Reference Architecture', - tagline: 'The turnkey architecture for AWS, Datadog & GitHub Actions to get up and running quickly using the Atmos open source framework.', - url: 'https://docs.cloudposse.com', - baseUrl: '/', - trailingSlash: true, - onBrokenLinks: 'throw', - onBrokenMarkdownLinks: 'warn', - onDuplicateRoutes: 'warn', - favicon: 'img/favicon.png', +async function createConfig() { + /** @type {import('@docusaurus/types').Config} */ + const config = { + title: 'The Cloud Posse Reference Architecture', + tagline: 'The turnkey architecture for AWS, Datadog & GitHub Actions to get up and running quickly using the Atmos open source framework.', + url: 'https://docs.cloudposse.com', + baseUrl: '/', + trailingSlash: true, + onBrokenLinks: 'throw', + onBrokenMarkdownLinks: 'warn', + onDuplicateRoutes: 'warn', + favicon: 'img/favicon.png', - organizationName: 'cloudposse', - projectName: 'docs', - deploymentBranch: 'master', - i18n: { - defaultLocale: 'en', - locales: ['en'], - }, + organizationName: 'cloudposse', + projectName: 'docs', + deploymentBranch: 'master', + i18n: { + defaultLocale: 'en', + locales: ['en'], + }, - plugins: [ - [ - '@docusaurus/plugin-google-tag-manager', - { - containerId: process.env.GOOGLE_TAG_MANAGER || 'GTM-ABCD123' - }, - ], - [ - 'docusaurus-plugin-image-zoom', {}, - ], - [ - '@docusaurus/plugin-client-redirects', { - redirects: [ - { - from: '/reference-architecture', - to: '/learn' - } - ], - }, - ], - [ - '@docusaurus/plugin-ideal-image', - { - quality: 90, - max: 1030, // max resized image's size. - min: 640, // min resized image's size. if original is lower, use that size. - steps: 2, // the max number of images generated between min and max (inclusive) - disableInDev: false, - } - ], - [ - 'custom-loaders', {} + plugins: [ + [ + '@docusaurus/plugin-google-tag-manager', + { + containerId: process.env.GOOGLE_TAG_MANAGER || 'GTM-ABCD123' + }, + ], + [ + 'docusaurus-plugin-image-zoom', {}, + ], + [ + '@docusaurus/plugin-ideal-image', + { + quality: 90, + max: 1030, // max resized image's size. + min: 640, // min resized image's size. if original is lower, use that size. + steps: 2, // the max number of images generated between min and max (inclusive) + disableInDev: false, + } + ], + path.resolve(__dirname, 'plugins/custom-loaders'), + metadataPlugin, + [ + "posthog-docusaurus", + { + apiKey: "phc_G3idXOACKt4vIzgRu2FVP8ORO1D2VlkeEwX9mE2jDvT", + appUrl: "https://us.i.posthog.com", + enableInDevelopment: false, // optional + }, + ], + [ + 'docusaurus-plugin-sentry', + { + DSN: 'b022344b0e7cc96f803033fff3b377ee@o56155.ingest.us.sentry.io/4507472203087872', + }, + ], + [ + '@docusaurus/plugin-client-redirects', + { + id: 'static-redirects', + redirects: getStaticRedirects(), + }, + ], + redirectsPlugin, ], - metadataPlugin, - [ - "posthog-docusaurus", - { - apiKey: "phc_G3idXOACKt4vIzgRu2FVP8ORO1D2VlkeEwX9mE2jDvT", - appUrl: "https://us.i.posthog.com", - enableInDevelopment: false, // optional - }, + + presets: [ + [ + 'classic', + /** @type {import('@docusaurus/preset-classic').Options} */ + ({ + docs: { + routeBasePath: '/', + sidebarPath: require.resolve('./sidebars.js'), + editUrl: ({versionDocsDirPath, docPath, locale}) => { + return `https://github.com/cloudposse/docs/edit/master/content/docs/${docPath}`; + }, + exclude: ['README.md'], + showLastUpdateTime: true, + showLastUpdateAuthor: true, + onInlineTags: 'warn', + tags: 'tags.yml' + }, + theme: { + customCss: customCssFiles, + }, + }), + ], ], - [ - 'docusaurus-plugin-sentry', + + scripts: [ { - DSN: 'b022344b0e7cc96f803033fff3b377ee@o56155.ingest.us.sentry.io/4507472203087872', + src: "https://kit.fontawesome.com/3a9f2eb5b9.js", }, - ] - ], - - presets: [ - [ - 'classic', - /** @type {import('@docusaurus/preset-classic').Options} */ - ({ - docs: { - routeBasePath: '/', - sidebarPath: require.resolve('./sidebars.js'), - editUrl: ({versionDocsDirPath, docPath, locale}) => { - return `https://github.com/cloudposse/docs/edit/master/content/docs/${docPath}`; - }, - exclude: ['README.md'], - showLastUpdateTime: true, - showLastUpdateAuthor: true, - onInlineTags: 'warn', - tags: 'tags.yml' - }, - theme: { - customCss: customCssFiles, - }, - }), ], - ], - scripts: [ - { - src: "https://kit.fontawesome.com/3a9f2eb5b9.js", + markdown: { + mermaid: true, }, - ], - - markdown: { - mermaid: true, - }, - themes: ['@docusaurus/theme-mermaid'], + themes: ['@docusaurus/theme-mermaid'], - themeConfig: - /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ - ({ - metadata: [{ name: 'google-site-verification', content: process.env.GOOGLE_SITE_VERIFICATION_ID || 'preview-local' }], - docs: { - sidebar: { - hideable: true, - autoCollapseCategories: true - }, - }, - navbar: { - title: '', - logo: { - alt: 'Cloud Posse Developer Hub', - src: 'img/logo.svg', - srcDark: 'img/logo-light.svg', - }, - items: [ - { - to: '/learn', - position: 'left', - label: 'Learn', - }, - { - to: '/reference', - position: 'left', - label: 'Reference', - }, - { - to: '/community', - label: 'Community', - position: 'left', - }, - { - type: 'search', - position: 'right', - }, - { - href: 'https://github.com/cloudposse/', - className: 'header-github-link', - position: 'right', + themeConfig: + /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ + ({ + metadata: [{ name: 'google-site-verification', content: process.env.GOOGLE_SITE_VERIFICATION_ID || 'preview-local' }], + docs: { + sidebar: { + hideable: true, + autoCollapseCategories: true }, - { - to: 'https://cloudposse.com/', - label: 'Get a Jumpstart', - position: 'right', - className: 'button button--primary navbar-cta-button' + }, + navbar: { + title: '', + logo: { + alt: 'Cloud Posse Developer Hub', + src: 'img/logo.svg', + srcDark: 'img/logo-light.svg', }, - ], - }, - - announcementBar: { - id: 'new_docs', - content: - 'We are in the process of updating our documentation. Please let us know what you think!', - backgroundColor: 'var(--announcement-bar-background)', - textColor: 'var(--announcement-bar-text-color)', - isCloseable: true, - }, - - colorMode: { - // "light" | "dark" - defaultMode: 'dark', - - // Hides the switch in the navbar - // Useful if you want to force a specific mode - disableSwitch: false, - - // Should respect the user's color scheme preference - // "light" | "dark" | "system" - respectPrefersColorScheme: false, - }, - - algolia: { - appId: process.env.ALGOLIA_APP_ID || '32YOERUX83', - apiKey: process.env.ALGOLIA_SEARCH_API_KEY || '557985309adf0e4df9dcf3cb29c61928', // this is SEARCH ONLY API key and is not sensitive information - indexName: process.env.ALGOLIA_INDEX_NAME || 'docs.cloudposse.com', - externalUrlRegex: 'atmos\\.tools', - contextualSearch: false - }, - footer: { - style: 'dark', - links: [{ - title: 'Docs', items: [ { + to: '/learn', + position: 'left', label: 'Learn', - to: '/learn/', }, { + to: '/reference', + position: 'left', label: 'Reference', - to: '/reference/', - } - ], - }, { - title: 'Community', - items: [ - { - label: 'GitHub Discussions', - href: 'https://github.com/orgs/cloudposse/discussions', - }, - { - label: 'Slack Community', - to: '/community/slack', }, { - label: 'Slack Archives', - href: 'https://archive.sweetops.com/refarch/', + to: '/community', + label: 'Community', + position: 'left', }, { - label: 'Office Hours', - to: '/community/office-hours/', + type: 'search', + position: 'right', }, - ], - }, { - title: 'Contact Us', - items: [ { - label: 'Support', - to: '/support', - }, - { - label: 'Our GitHub', href: 'https://github.com/cloudposse/', + className: 'header-github-link', + position: 'right', }, { - label: 'Contact Us', - to: '/community/contact-us/', - }], - }], - logo: { - alt: 'Cloud Posse', - src: '/img/logo-light.svg', - href: 'https://cloudposse.com/' + to: 'https://cloudposse.com/', + label: 'Get a Jumpstart', + position: 'right', + className: 'button button--primary navbar-cta-button' + }, + ], }, - copyright: `© ${new Date().getFullYear()} Cloud Posse, LLC`, - }, - mermaid: { - theme: { - light: 'neutral', - dark: 'dark', + + announcementBar: { + id: 'new_docs', + content: + 'We are in the process of updating our documentation. Please let us know what you think!', + backgroundColor: 'var(--announcement-bar-background)', + textColor: 'var(--announcement-bar-text-color)', + isCloseable: true, }, - options: { - flowchart: { - useMaxWidth: true, - curve: 'linear', - padding: 15, - diagramPadding: 20, - nodeSpacing: 40, - rankSpacing: 50, - ranksep: 100, - nodesep: 100, - titleTopMargin: 25, - titlePadding: 30, - labelPadding: 30, - subGraphTitleMargin: { - top: 5, - bottom: 5 - } + + colorMode: { + // "light" | "dark" + defaultMode: 'dark', + + // Hides the switch in the navbar + // Useful if you want to force a specific mode + disableSwitch: false, + + // Should respect the user's color scheme preference + // "light" | "dark" | "system" + respectPrefersColorScheme: false, + }, + + algolia: { + appId: process.env.ALGOLIA_APP_ID || '32YOERUX83', + apiKey: process.env.ALGOLIA_SEARCH_API_KEY || '557985309adf0e4df9dcf3cb29c61928', // this is SEARCH ONLY API key and is not sensitive information + indexName: process.env.ALGOLIA_INDEX_NAME || 'docs.cloudposse.com', + externalUrlRegex: 'atmos\\.tools', + contextualSearch: false + }, + footer: { + style: 'dark', + links: [{ + title: 'Docs', + items: [ + { + label: 'Learn', + to: '/learn/', + }, + { + label: 'Reference', + to: '/reference/', + } + ], + }, { + title: 'Community', + items: [ + { + label: 'GitHub Discussions', + href: 'https://github.com/orgs/cloudposse/discussions', + }, + { + label: 'Slack Community', + to: '/community/slack', + }, + { + label: 'Slack Archives', + href: 'https://archive.sweetops.com/refarch/', + }, + { + label: 'Office Hours', + to: '/community/office-hours/', + }, + ], + }, { + title: 'Contact Us', + items: [ + { + label: 'Support', + to: '/support', + }, + { + label: 'Our GitHub', + href: 'https://github.com/cloudposse/', + }, + { + label: 'Contact Us', + to: '/community/contact-us/', + }], + }], + logo: { + alt: 'Cloud Posse', + src: '/img/logo-light.svg', + href: 'https://cloudposse.com/' + }, + copyright: `© ${new Date().getFullYear()} Cloud Posse, LLC`, + }, + mermaid: { + theme: { + light: 'neutral', + dark: 'dark', }, + options: { + flowchart: { + useMaxWidth: true, + curve: 'linear', + padding: 15, + diagramPadding: 20, + nodeSpacing: 40, + rankSpacing: 50, + ranksep: 100, + nodesep: 100, + titleTopMargin: 25, + titlePadding: 30, + labelPadding: 30, + subGraphTitleMargin: { + top: 5, + bottom: 5 + } + }, - themeVariables: { - mainBkg: '#6f72723b', - background: '#333', - clusterBkg: '#6f72723b' + themeVariables: { + mainBkg: '#6f72723b', + background: '#333', + clusterBkg: '#6f72723b' + } } - } - }, - prism: { - theme: lightCodeTheme, - darkTheme: darkCodeTheme, - additionalLanguages: ['hcl', 'bash', 'rego'], - }, - zoom: { - selector: '.markdown > img', - config: { - // options you can specify via https://github.com/francoischalifour/medium-zoom#usage - background: { - light: 'rgb(255, 255, 255)', - dark: 'rgb(50, 50, 50)' + }, + prism: { + theme: lightCodeTheme, + darkTheme: darkCodeTheme, + additionalLanguages: ['hcl', 'bash', 'rego'], + }, + zoom: { + selector: '.markdown > img', + config: { + // options you can specify via https://github.com/francoischalifour/medium-zoom#usage + background: { + light: 'rgb(255, 255, 255)', + dark: 'rgb(50, 50, 50)' + } } } - } - }), -}; + }), + }; + return config; +} -module.exports = config; +module.exports = createConfig(); diff --git a/examples/snippets/.github/workflows/atmos-terraform-apply.yaml b/examples/snippets/.github/workflows/atmos-terraform-apply.yaml index cf7625307..ecf28fa1c 100644 --- a/examples/snippets/.github/workflows/atmos-terraform-apply.yaml +++ b/examples/snippets/.github/workflows/atmos-terraform-apply.yaml @@ -21,7 +21,7 @@ jobs: - "amd64" - "common" steps: - - uses: cloudposse-github-actions/get-pr@v1 + - uses: cloudposse-github-actions/get-pr@v2 id: pr outputs: @@ -37,7 +37,7 @@ jobs: runs-on: ["self-hosted", "terraform"] steps: - id: affected - uses: cloudposse/github-action-atmos-affected-stacks@v3 + uses: cloudposse/github-action-atmos-affected-stacks@v4 with: base-ref: ${{ github.event.pull_request.base.sha }} head-ref: ${{ github.event.pull_request.head.sha }} diff --git a/examples/snippets/.github/workflows/atmos-terraform-drift-remediation.yaml b/examples/snippets/.github/workflows/atmos-terraform-drift-remediation.yaml index 594e950fa..7f6c6b441 100644 --- a/examples/snippets/.github/workflows/atmos-terraform-drift-remediation.yaml +++ b/examples/snippets/.github/workflows/atmos-terraform-drift-remediation.yaml @@ -18,6 +18,7 @@ jobs: name: Remediate Drift runs-on: ["self-hosted", "terraform"] steps: + - uses: unfor19/install-aws-cli-action@v1 - name: Remediate Drift uses: cloudposse/github-action-atmos-terraform-drift-remediation@v2 with: @@ -43,3 +44,4 @@ jobs: action: discard atmos-version: ${{ vars.ATMOS_VERSION }} atmos-config-path: ${{ vars.ATMOS_CONFIG_PATH }} + diff --git a/examples/snippets/.github/workflows/atmos-terraform-plan.yaml b/examples/snippets/.github/workflows/atmos-terraform-plan.yaml index 56dbc5ee3..157bed37c 100644 --- a/examples/snippets/.github/workflows/atmos-terraform-plan.yaml +++ b/examples/snippets/.github/workflows/atmos-terraform-plan.yaml @@ -21,7 +21,7 @@ jobs: runs-on: ["self-hosted", "terraform"] steps: - id: affected - uses: cloudposse/github-action-atmos-affected-stacks@v3 + uses: cloudposse/github-action-atmos-affected-stacks@v4 with: atmos-version: ${{ vars.ATMOS_VERSION }} atmos-config-path: ${{ vars.ATMOS_CONFIG_PATH }} diff --git a/examples/snippets/components/docker/infra-acme/Dockerfile b/examples/snippets/components/docker/infra-acme/Dockerfile new file mode 100644 index 000000000..9f23b189c --- /dev/null +++ b/examples/snippets/components/docker/infra-acme/Dockerfile @@ -0,0 +1,66 @@ +# https://github.com/cloudposse/geodesic/ +ARG GEODESIC_VERSION=2.5.0 +ARG GEODESIC_OS=debian +# https://github.com/cloudposse/atmos +ARG ATMOS_VERSION=1.63.0 +# This should match the version set in stacks/catalog/spacelift.yaml +# This should match the version set in .github/workflows/auto-format.yaml +ARG TF_1_VERSION=1.4.5 + +FROM public.ecr.aws/cloudposse/geodesic:${GEODESIC_VERSION}-${GEODESIC_OS} + +# Geodesic message of the Day +ENV MOTD_URL="https://geodesic.sh/motd" + +# Some configuration options for Geodesic +ENV AWS_SAML2AWS_ENABLED=false +ENV AWS_VAULT_ENABLED=false +ENV AWS_VAULT_SERVER_ENABLED=false +ENV CHAMBER_KMS_KEY_ALIAS=aws/ssm +ENV GEODESIC_TF_PROMPT_ACTIVE=false +ENV DIRENV_ENABLED=false + +# Enable advanced AWS assume role chaining for tools using AWS SDK +# https://docs.aws.amazon.com/sdk-for-go/api/aws/session/ +ENV AWS_SDK_LOAD_CONFIG=1 +ENV AWS_DEFAULT_REGION=us-east-1 +ENV AWS_DEFAULT_SHORT_REGION=use1 +ENV AWS_REGION_ABBREVIATION_TYPE=short +# Install specific versions of Terraform. Must match versions in Spacelift terraform_version_map +# in components/terraform/spacelift/default.auto.tfvars +ARG TF_1_VERSION +RUN apt-get update && apt-get install -y -u --allow-downgrades \ + terraform-1="${TF_1_VERSION}-*" && \ + update-alternatives --set terraform /usr/share/terraform/1/bin/terraform + +ARG ATMOS_VERSION +RUN apt-get update && apt-get install -y --allow-downgrades \ + atmos="${ATMOS_VERSION}-*" \ + spacectl + +# Install pluto - a CLI tool to help discover deprecated and removed apiVersions in Kubernetes +# https://pluto.docs.fairwinds.com/ +# https://github.com/FairwindsOps/pluto +RUN apt-get update && apt-get install -y --allow-downgrades \ + pluto + +COPY rootfs/ / + + +ARG DOCKER_REPO +ARG TENANT="core" +ENV NAMESPACE=acme +# Format of Geodesic banner prompt +ENV BANNER=${NAMESPACE} +ENV DOCKER_IMAGE="${NAMESPACE}/infra" +ENV DOCKER_TAG="latest" + +# Default AWS_PROFILE +ENV AWS_PROFILE=${NAMESPACE}-identity +# This sets the default AWS_CONFIG to be used after signing in with Leapp. +# Once logged in, this config file gives you access to all the other teams +# and roles (if you are authorized for access). +ENV AWS_CONFIG_FILE=/etc/aws-config/aws-config-teams +ENV ASSUME_ROLE_INTERACTIVE_QUERY=${NAMESPACE}${TENANT:+-$TENANT}-gbl- + +WORKDIR / diff --git a/examples/snippets/stacks/workflows/addons.yaml b/examples/snippets/stacks/workflows/addons.yaml index 66a8d947b..1f46f57aa 100644 --- a/examples/snippets/stacks/workflows/addons.yaml +++ b/examples/snippets/stacks/workflows/addons.yaml @@ -9,37 +9,39 @@ workflows: description: Vendor addon components. steps: - command: workflow vendor -f addons/alb + - command: workflow vendor -f addons/api-gateway - command: workflow vendor -f addons/cognito + - command: workflow vendor -f addons/ec2-instance + - command: workflow vendor -f addons/kinesis-stream - command: workflow vendor -f addons/kms - command: workflow vendor -f addons/lambda + - command: workflow vendor -f addons/memorydb - command: workflow vendor -f addons/s3-bucket + - command: workflow vendor -f addons/ses - command: workflow vendor -f addons/sns-topic + - command: workflow vendor -f addons/spa-s3-cloudfront - command: workflow vendor -f addons/sqs-queue - command: workflow vendor -f addons/ssm-parameters - command: workflow vendor -f addons/waf - - command: workflow vendor -f addons/api-gateway - - command: workflow vendor -f addons/kinesis-stream - - command: workflow vendor -f addons/ses - - command: workflow vendor -f addons/spa-s3-cloudfront - - command: workflow vendor -f addons/ec2-instance deploy/all: description: run all deploy workflows steps: - command: workflow deploy/alb -f addons + - command: workflow deploy/api-gateway -f addons - command: workflow deploy/cognito -f addons + - command: workflow deploy/ec2-instance -f addons + - command: workflow deploy/kinesis-stream -f addons - command: workflow deploy/kms -f addons - command: workflow deploy/lambda -f addons + - command: workflow deploy/memorydb -f addons - command: workflow deploy/s3-bucket -f addons + - command: workflow deploy/ses -f addons - command: workflow deploy/sns-topic -f addons + - command: workflow deploy/spa-s3-cloudfront -f addons - command: workflow deploy/sqs-queue -f addons - command: workflow deploy/ssm-parameters -f addons - command: workflow deploy/waf -f addons - - command: workflow deploy/api-gateway -f addons - - command: workflow deploy/kinesis-stream -f addons - - command: workflow deploy/ses -f addons - - command: workflow deploy/spa-s3-cloudfront -f addons - - command: workflow deploy/ec2-instance -f addons deploy/alb: description: >- @@ -47,12 +49,30 @@ workflows: Please modify the stacks/workflows/addons/alb.yaml to customize steps: - command: workflow all -f addons/alb + deploy/api-gateway: + description: >- + Deploy Api-Gateway Component. + Please modify the stacks/workflows/addons/api-gateway.yaml to customize + steps: + - command: workflow all -f addons/api-gateway deploy/cognito: description: >- Deploy Cognito Component. Please modify the stacks/workflows/addons/cognito.yaml to customize steps: - command: workflow all -f addons/cognito + deploy/ec2-instance: + description: >- + Deploy Ec2-Instance Component. + Please modify the stacks/workflows/addons/ec2-instance.yaml to customize + steps: + - command: workflow all -f addons/ec2-instance + deploy/kinesis-stream: + description: >- + Deploy Kinesis-Stream Component. + Please modify the stacks/workflows/addons/kinesis-stream.yaml to customize + steps: + - command: workflow all -f addons/kinesis-stream deploy/kms: description: >- Deploy Kms Component. @@ -65,18 +85,36 @@ workflows: Please modify the stacks/workflows/addons/lambda.yaml to customize steps: - command: workflow all -f addons/lambda + deploy/memorydb: + description: >- + Deploy Memorydb Component. + Please modify the stacks/workflows/addons/memorydb.yaml to customize + steps: + - command: workflow all -f addons/memorydb deploy/s3-bucket: description: >- Deploy S3-Bucket Component. Please modify the stacks/workflows/addons/s3-bucket.yaml to customize steps: - command: workflow all -f addons/s3-bucket + deploy/ses: + description: >- + Deploy Ses Component. + Please modify the stacks/workflows/addons/ses.yaml to customize + steps: + - command: workflow all -f addons/ses deploy/sns-topic: description: >- Deploy Sns-Topic Component. Please modify the stacks/workflows/addons/sns-topic.yaml to customize steps: - command: workflow all -f addons/sns-topic + deploy/spa-s3-cloudfront: + description: >- + Deploy Spa-S3-Cloudfront Component. + Please modify the stacks/workflows/addons/spa-s3-cloudfront.yaml to customize + steps: + - command: workflow all -f addons/spa-s3-cloudfront deploy/sqs-queue: description: >- Deploy Sqs-Queue Component. @@ -95,33 +133,3 @@ workflows: Please modify the stacks/workflows/addons/waf.yaml to customize steps: - command: workflow all -f addons/waf - deploy/api-gateway: - description: >- - Deploy Api-Gateway Component. - Please modify the stacks/workflows/addons/api-gateway.yaml to customize - steps: - - command: workflow all -f addons/api-gateway - deploy/kinesis-stream: - description: >- - Deploy Kinesis-Stream Component. - Please modify the stacks/workflows/addons/kinesis-stream.yaml to customize - steps: - - command: workflow all -f addons/kinesis-stream - deploy/ses: - description: >- - Deploy Ses Component. - Please modify the stacks/workflows/addons/ses.yaml to customize - steps: - - command: workflow all -f addons/ses - deploy/spa-s3-cloudfront: - description: >- - Deploy Spa-S3-Cloudfront Component. - Please modify the stacks/workflows/addons/spa-s3-cloudfront.yaml to customize - steps: - - command: workflow all -f addons/spa-s3-cloudfront - deploy/ec2-instance: - description: >- - Deploy Ec2-Instance Component. - Please modify the stacks/workflows/addons/ec2-instance.yaml to customize - steps: - - command: workflow all -f addons/ec2-instance diff --git a/examples/snippets/stacks/workflows/addons/memorydb.yaml b/examples/snippets/stacks/workflows/addons/memorydb.yaml new file mode 100644 index 000000000..1ec2d9f7d --- /dev/null +++ b/examples/snippets/stacks/workflows/addons/memorydb.yaml @@ -0,0 +1,10 @@ +workflows: + all: + description: run all workflows + steps: + - command: terraform deploy memorydb --stack plat-use1-sandbox + + vendor: + description: vendor all components required for this addon + steps: + - command: vendor pull --component memorydb diff --git a/examples/snippets/stacks/workflows/github.yaml b/examples/snippets/stacks/workflows/github.yaml index 9b93bb85f..0847f21e4 100644 --- a/examples/snippets/stacks/workflows/github.yaml +++ b/examples/snippets/stacks/workflows/github.yaml @@ -1,20 +1,17 @@ workflows: - all: + vendor/arc: description: | - This workflow vendors all Github Action related components to the latest provided version + This workflow vendors all EKS/ARC related components to the latest provided version steps: - - command: workflow vendor -f github - - command: workflow deploy/github-oidc-provider -f github - - command: workflow deploy/github-runners -f github - - vendor: + - command: vendor pull --component github-oidc-provider + - command: vendor pull --component eks/actions-runner-controller + vendor/philips-labs: description: | - This workflow vendors all Github Action related components to the latest provided version + This workflow vendors all Philips Labs self-hosted runner related components to the latest provided version steps: - - command: vendor pull --component eks/actions-runner-controller - - command: vendor pull --component philips-labs-github-runners - command: vendor pull --component github-oidc-provider + - command: vendor pull --component philips-labs-github-runners deploy/github-oidc-provider: description: | @@ -28,19 +25,18 @@ workflows: - command: terraform deploy github-oidc-provider -s plat-gbl-staging - command: terraform deploy github-oidc-provider -s plat-gbl-prod - deploy/github-runners: + deploy/arc-github-runners: description: | - This workflow deploys the github runners + This workflow deploys the EKS/ARC GitHub self-hosted runners steps: - command: terraform deploy iam-service-linked-roles -s core-gbl-auto - command: workflow deploy/cluster -s core-use1-auto -f eks - command: workflow deploy/resources -s core-use1-auto -f eks - command: terraform deploy eks/actions-runner-controller -s core-use1-auto - - command: workflow deploy/pl-github-runners -f github - deploy/pl-github-runners: + upload/pl-secrets: description: | - This workflow deploys the github runners from Philips Labs (component philips-labs-github-runners) + This workflow uploads the secrets for the Philips Labs self-hosted runners steps: - type: shell command: |- @@ -87,4 +83,9 @@ workflows: exit 0 fi AWS_PROFILE=acme-core-gbl-auto-admin chamber write pl-github-runners id "$APP_ID" + + deploy/pl-github-runners: + description: | + This workflow deploys the github runners from Philips Labs (component philips-labs-github-runners) + steps: - command: terraform deploy philips-labs-github-runners -s core-use1-auto \ No newline at end of file diff --git a/examples/snippets/stacks/workflows/grafana.yaml b/examples/snippets/stacks/workflows/grafana.yaml index 09e9ab41e..379af9ce7 100644 --- a/examples/snippets/stacks/workflows/grafana.yaml +++ b/examples/snippets/stacks/workflows/grafana.yaml @@ -30,7 +30,7 @@ workflows: command: |- echo "Now update the eks/cluster map_additional_iam_roles and reapply eks/cluster" - deploy/data-sources: + deploy/grafana: description: deploys centralized Grafana and all sub components steps: - command: terraform deploy grafana -s core-use1-auto diff --git a/package-lock.json b/package-lock.json index 3f666001f..8d20ec124 100644 --- a/package-lock.json +++ b/package-lock.json @@ -6428,8 +6428,8 @@ "license": "MIT" }, "node_modules/custom-loaders": { - "resolved": "plugins/custom-loaders", - "link": true + "version": "0.0.0", + "resolved": "file:plugins/custom-loaders" }, "node_modules/cytoscape": { "version": "3.30.1", @@ -18860,9 +18860,6 @@ "type": "github", "url": "https://github.com/sponsors/wooorm" } - }, - "plugins/custom-loaders": { - "version": "0.0.0" } } } diff --git a/plugins/custom-loaders/index.js b/plugins/custom-loaders/index.js index 2206123b6..1240810f6 100644 --- a/plugins/custom-loaders/index.js +++ b/plugins/custom-loaders/index.js @@ -8,41 +8,40 @@ const html = require('html-loader'); const path = require('path'); module.exports = function (context, options) { - return { - name: 'custom-loaders', - configureWebpack(config, isServer) { - return { - /*output: { - filename: 'custom-loaders-webpack.bundle.js', - },*/ - - module: { - rules: [ - // { test: /\.txt$/, use: 'raw-loader' }, - // https://webpack.js.org/loaders/html-loader/ - { - test: /\.(html|htm)$/i, - loader: "html-loader", - options: { - minimize: { - removeComments: false, - collapseWhitespace: false, - }, + return { + name: 'custom-loaders', + configureWebpack(config, isServer) { + return { + module: { + rules: [ + // Existing rule for HTML files + { + test: /\.(html|htm)$/i, + loader: 'html-loader', + options: { + minimize: { + removeComments: false, + collapseWhitespace: false, }, }, - { - test: /\.(txt|yml|yaml|tf)$/, - use: 'raw-loader' - } - ], - }, + }, + { + test: /\.(txt|yml|yaml|tf)$/i, + use: 'raw-loader', + }, + { + test: /(?:Dockerfile|Makefile)$/i, + use: 'raw-loader', + }, + ], + }, - resolve: { - alias: { - '@examples': path.resolve(__dirname, 'examples'), - } - } - }; - }, - }; + resolve: { + alias: { + '@examples': path.resolve(__dirname, 'examples'), + }, + }, + }; + }, }; +}; diff --git a/plugins/dynamicRedirects/index.js b/plugins/dynamicRedirects/index.js new file mode 100644 index 000000000..35b01bb37 --- /dev/null +++ b/plugins/dynamicRedirects/index.js @@ -0,0 +1,71 @@ +/* + * This plugin is not working yet! + * We have these redirects created with a static file in the staticRedirects plugin. + * We'll keep this file as reference for future improvements. + */ + +async function loadRedirects(allContent) { + const redirects = []; + + // Check if 'docusaurus-plugin-content-docs' is available and has the expected structure + const docsPlugin = allContent['docusaurus-plugin-content-docs']; + + // Check that docsPlugin and its required properties exist + if (!docsPlugin || !docsPlugin.default || !docsPlugin.default.loadedVersions) { + console.error("Docs plugin content is not available or improperly structured. Skipping redirects generation."); + return redirects; // Return empty array if the docs content is not available + } + + // Access the docs from the first loaded version + const docs = docsPlugin.default.loadedVersions[0]?.docs || []; + + // Iterate over docs to create redirects based on refarch_id + docs.forEach((doc) => { + if (doc.frontMatter?.refarch_id) { + redirects.push( + { + from: `/reference-architecture/${doc.frontMatter.refarch_id}`, + to: doc.permalink, + }, + { + from: `/${doc.frontMatter.refarch_id}`, + to: doc.permalink, + } + ); + } + }); + + // console.debug('Loaded redirects:', redirects); + return redirects; +} + +function redirectsPlugin(context, options) { + return { + name: 'redirects-plugin', + + // Since the loadContent lifecycle method is synchronous, let's adjust this + async loadContent() { + // Returning an empty object as loadContent does not receive allContent directly + return {}; + }, + + async allContentLoaded({ actions, allContent }) { + const { setGlobalData } = actions; + + // Load redirects using the custom loadRedirects function + const redirects = await loadRedirects(allContent); + + // Set redirects in global data + setGlobalData({ + redirects, + }); + + // console.debug('Global data set with redirects.'); + }, + }; +} + +// Export the plugin +module.exports = { + redirectsPlugin, +}; diff --git a/plugins/staticRedirects/index.js b/plugins/staticRedirects/index.js new file mode 100644 index 000000000..a24e4539b --- /dev/null +++ b/plugins/staticRedirects/index.js @@ -0,0 +1,17 @@ +const fs = require('fs'); +const path = require('path'); + +function getStaticRedirects() { + // Load redirects directly within this module + const docsRedirects = JSON.parse(fs.readFileSync(path.resolve(__dirname, 'redirects/docs.json'), 'utf-8')); + const legacyRedirects = JSON.parse(fs.readFileSync(path.resolve(__dirname, 'redirects/legacy_setup_docs.json'), 'utf-8')); + const refarchRedirects = JSON.parse(fs.readFileSync(path.resolve(__dirname, 'redirects/refarch.json'), 'utf-8')); + + // Combine the loaded redirects into a single array + return [...docsRedirects, ...legacyRedirects, ...refarchRedirects]; +} + +// Export the plugin and helper functions +module.exports = { + getStaticRedirects, +}; diff --git a/plugins/staticRedirects/redirects/docs.json b/plugins/staticRedirects/redirects/docs.json new file mode 100644 index 000000000..d8eb0ba62 --- /dev/null +++ b/plugins/staticRedirects/redirects/docs.json @@ -0,0 +1,6 @@ +[ + { + "from": "/reference-architecture", + "to": "/learn" + } +] diff --git a/plugins/staticRedirects/redirects/legacy_setup_docs.json b/plugins/staticRedirects/redirects/legacy_setup_docs.json new file mode 100644 index 000000000..1c1b63758 --- /dev/null +++ b/plugins/staticRedirects/redirects/legacy_setup_docs.json @@ -0,0 +1,98 @@ +[ + { + "from": "/reference-architecture/design-decisions/foundational-release-engineering/decide-on-self-hosted-github-runner-strategy", + "to": "/layers/software-delivery/design-decisions/decide-on-self-hosted-github-runner-strategy" + }, + { + "from": "/reference-architecture/fundamentals", + "to": "/learn/prerequisites" + }, + { + "from": "/reference-architecture/fundamentals", + "to": "/learn/prerequisites" + }, + { + "from": "/reference-architecture/fundamentals/ecs", + "to": "/layers/software-delivery/ecs-ecspresso" + }, + { + "from": "/reference-architecture/fundamentals/gitops", + "to": "/layers/gitops" + }, + { + "from": "/reference-architecture/how-to-guides/integrations/opsgenie", + "to": "/layers/alerting/opsgenie" + }, + { + "from": "/reference-architecture/how-to-guides/integrations/opsgenie/how-to-create-new-teams-in-opsgenie", + "to": "/layers/alerting/opsgenie/how-to-create-new-teams-in-opsgenie" + }, + { + "from": "/reference-architecture/how-to-guides/integrations/opsgenie/how-to-sign-up-for-opsgenie", + "to": "/layers/alerting/opsgenie/how-to-sign-up-for-opsgenie" + }, + { + "from": "/reference-architecture/how-to-guides/integrations/spacelift", + "to": "/layers/spacelift" + }, + { + "from": "/reference-architecture/how-to-guides/tutorials/how-to-setup-grafana", + "to": "/layers/monitoring/grafana" + }, + { + "from": "/reference-architecture/quickstart/eks", + "to": "/layers/eks" + }, + { + "from": "/reference-architecture/quickstart/iam-identity", + "to": "/layers/identity" + }, + { + "from": "/reference-architecture/reference/adrs/jumpstart/decide-on-vanity-domain", + "to": "/layers/network/design-decisions/decide-on-vanity-branded-domain" + }, + { + "from": "/reference-architecture/setup/cold-start", + "to": "/layers/accounts/deploy-accounts" + }, + { + "from": "/reference-architecture/setup/cold-start", + "to": "/layers/accounts/deploy-accounts" + }, + { + "from": "/reference-architecture/setup/cold-start/automated-configuration", + "to": "/layers/accounts/prepare-aws-organization" + }, + { + "from": "/reference-architecture/setup/cold-start/how-to-create-superadmin-user", + "to": "/layers/accounts/tutorials/how-to-create-superadmin-user" + }, + { + "from": "/reference-architecture/setup/cold-start/manual-configuration", + "to": "/layers/accounts/tutorials/manual-configuration" + }, + { + "from": "/reference-architecture/setup/eks", + "to": "/learn/maintenance/upgrades/how-to-upgrade-eks" + }, + { + "from": "/reference-architecture/setup/github-arc", + "to": "/layers/github-actions/eks-github-actions-controller" + }, + { + "from": "/reference-architecture/setup/github-runners", + "to": "/layers/github-actions" + }, + { + "from": "/reference-architecture/setup/identity", + "to": "/layers/identity" + }, + { + "from": "/reference-architecture/setup/network", + "to": "/layers/network/connect-network" + }, + { + "from": "/reference-architecture/setup/philips-labs-github-runners", + "to": "/layers/github-actions/philips-labs-github-runners" + } +] diff --git a/plugins/staticRedirects/redirects/refarch.json b/plugins/staticRedirects/redirects/refarch.json new file mode 100644 index 000000000..5c4d78bb0 --- /dev/null +++ b/plugins/staticRedirects/redirects/refarch.json @@ -0,0 +1,210 @@ +[ + { + "from": "/reference-architecture/REFARCH-34", + "to": "/layers/project/design-decisions/decide-on-1password-strategy" + }, + { + "from": "/REFARCH-34", + "to": "/layers/project/design-decisions/decide-on-1password-strategy" + }, + { + "from": "/reference-architecture/REFARCH-46", + "to": "/layers/network/design-decisions/decide-on-service-discovery-domain" + }, + { + "from": "/REFARCH-46", + "to": "/layers/network/design-decisions/decide-on-service-discovery-domain" + }, + { + "from": "/reference-architecture/REFARCH-49", + "to": "/layers/eks/design-decisions/decide-on-host-os-flavor-for-eks" + }, + { + "from": "/REFARCH-49", + "to": "/layers/eks/design-decisions/decide-on-host-os-flavor-for-eks" + }, + { + "from": "/reference-architecture/REFARCH-50", + "to": "/layers/accounts/design-decisions/decide-on-mfa-solution-for-aws-root-accounts" + }, + { + "from": "/REFARCH-50", + "to": "/layers/accounts/design-decisions/decide-on-mfa-solution-for-aws-root-accounts" + }, + { + "from": "/reference-architecture/REFARCH-51", + "to": "/layers/accounts/design-decisions/decide-on-email-address-format-for-aws-accounts" + }, + { + "from": "/REFARCH-51", + "to": "/layers/accounts/design-decisions/decide-on-email-address-format-for-aws-accounts" + }, + { + "from": "/reference-architecture/REFARCH-52", + "to": "/layers/project/design-decisions/decide-on-infrastructure-repository-name" + }, + { + "from": "/REFARCH-52", + "to": "/layers/project/design-decisions/decide-on-infrastructure-repository-name" + }, + { + "from": "/reference-architecture/REFARCH-53", + "to": "/layers/project/design-decisions/decide-on-namespace-abbreviation" + }, + { + "from": "/REFARCH-53", + "to": "/layers/project/design-decisions/decide-on-namespace-abbreviation" + }, + { + "from": "/reference-architecture/REFARCH-54", + "to": "/layers/network/design-decisions/decide-on-vanity-branded-domain" + }, + { + "from": "/REFARCH-54", + "to": "/layers/network/design-decisions/decide-on-vanity-branded-domain" + }, + { + "from": "/reference-architecture/REFARCH-55", + "to": "/layers/accounts/design-decisions/decide-on-aws-account-flavors-and-organizational-units" + }, + { + "from": "/REFARCH-55", + "to": "/layers/accounts/design-decisions/decide-on-aws-account-flavors-and-organizational-units" + }, + { + "from": "/reference-architecture/REFARCH-56", + "to": "/layers/network/design-decisions/decide-on-primary-aws-region" + }, + { + "from": "/REFARCH-56", + "to": "/layers/network/design-decisions/decide-on-primary-aws-region" + }, + { + "from": "/reference-architecture/REFARCH-79", + "to": "/resources/legacy/design-decisions/decide-on-transactional-email-smtp-provider-for-operational-email" + }, + { + "from": "/REFARCH-79", + "to": "/resources/legacy/design-decisions/decide-on-transactional-email-smtp-provider-for-operational-email" + }, + { + "from": "/reference-architecture/REFARCH-80", + "to": "/layers/network/design-decisions/decide-vpc-peering-requirements-e-g-to-legacy-env" + }, + { + "from": "/REFARCH-80", + "to": "/layers/network/design-decisions/decide-vpc-peering-requirements-e-g-to-legacy-env" + }, + { + "from": "/reference-architecture/REFARCH-81", + "to": "/layers/project/design-decisions/decide-on-secrets-management-strategy-for-terraform" + }, + { + "from": "/REFARCH-81", + "to": "/layers/project/design-decisions/decide-on-secrets-management-strategy-for-terraform" + }, + { + "from": "/reference-architecture/REFARCH-91", + "to": "/layers/software-delivery/design-decisions/decide-how-to-distribute-docker-images" + }, + { + "from": "/REFARCH-91", + "to": "/layers/software-delivery/design-decisions/decide-how-to-distribute-docker-images" + }, + { + "from": "/reference-architecture/REFARCH-111", + "to": "/layers/monitoring/design-decisions/decide-on-external-monitoring-solution" + }, + { + "from": "/REFARCH-111", + "to": "/layers/monitoring/design-decisions/decide-on-external-monitoring-solution" + }, + { + "from": "/reference-architecture/REFARCH-112", + "to": "/layers/software-delivery/design-decisions/decide-on-maintenance-page-solution" + }, + { + "from": "/REFARCH-112", + "to": "/layers/software-delivery/design-decisions/decide-on-maintenance-page-solution" + }, + { + "from": "/reference-architecture/REFARCH-144", + "to": "/layers/software-delivery/design-decisions/decide-on-database-seeding-strategy-for-ephemeral-preview-enviro" + }, + { + "from": "/REFARCH-144", + "to": "/layers/software-delivery/design-decisions/decide-on-database-seeding-strategy-for-ephemeral-preview-enviro" + }, + { + "from": "/reference-architecture/REFARCH-202", + "to": "/layers/eks/design-decisions/decide-on-email-address-for-cert-manager-support-emails" + }, + { + "from": "/REFARCH-202", + "to": "/layers/eks/design-decisions/decide-on-email-address-for-cert-manager-support-emails" + }, + { + "from": "/reference-architecture/REFARCH-207", + "to": "/layers/eks/design-decisions/decide-on-helm-chart-repository-strategy" + }, + { + "from": "/REFARCH-207", + "to": "/layers/eks/design-decisions/decide-on-helm-chart-repository-strategy" + }, + { + "from": "/reference-architecture/REFARCH-208", + "to": "/layers/network/design-decisions/decide-on-hostname-scheme-for-service-discovery" + }, + { + "from": "/REFARCH-208", + "to": "/layers/network/design-decisions/decide-on-hostname-scheme-for-service-discovery" + }, + { + "from": "/reference-architecture/REFARCH-209", + "to": "/layers/project/design-decisions/decide-on-regional-naming-scheme" + }, + { + "from": "/REFARCH-209", + "to": "/layers/project/design-decisions/decide-on-regional-naming-scheme" + }, + { + "from": "/reference-architecture/REFARCH-210", + "to": "/layers/data/design-decisions/decide-whether-to-use-rds-iam-authentication" + }, + { + "from": "/REFARCH-210", + "to": "/layers/data/design-decisions/decide-whether-to-use-rds-iam-authentication" + }, + { + "from": "/reference-architecture/REFARCH-211", + "to": "/layers/data/design-decisions/decide-on-rds-technology-and-architecture" + }, + { + "from": "/REFARCH-211", + "to": "/layers/data/design-decisions/decide-on-rds-technology-and-architecture" + }, + { + "from": "/reference-architecture/REFARCH-217", + "to": "/layers/network/design-decisions/decide-on-aws-account-vpc-subnet-cidr-strategy" + }, + { + "from": "/REFARCH-217", + "to": "/layers/network/design-decisions/decide-on-aws-account-vpc-subnet-cidr-strategy" + }, + { + "from": "/reference-architecture/REFARCH-236", + "to": "/layers/eks/design-decisions/decide-on-eks-node-pool-architecture" + }, + { + "from": "/REFARCH-236", + "to": "/layers/eks/design-decisions/decide-on-eks-node-pool-architecture" + }, + { + "from": "/reference-architecture/REFARCH-240", + "to": "/layers/network/design-decisions/decide-on-organization-supernet-cidr-ranges" + }, + { + "from": "/REFARCH-240", + "to": "/layers/network/design-decisions/decide-on-organization-supernet-cidr-ranges" + } +] diff --git a/src/components/Step/index.js b/src/components/Step/index.js index b9abbbcb3..d9648056b 100644 --- a/src/components/Step/index.js +++ b/src/components/Step/index.js @@ -1,30 +1,16 @@ // src/components/Step.js -import React, { useEffect, useState, createContext, useContext } from 'react'; +import React, { useContext } from 'react'; +import { StepContext } from '@site/src/components/Steps'; import './index.css'; -let stepCounter = 0; - -export const StepContext = createContext(0); - -export const resetStepCounter = () => { - stepCounter = 0; -}; - const Step = ({ title, children }) => { - const [stepNumber, setStepNumber] = useState(stepCounter); - - useEffect(() => { - stepCounter += 1; - setStepNumber(stepCounter); - }, []); + const stepNumber = useContext(StepContext); // Consume the step number from context return ( - -
- {title &&

{`Step ${stepNumber}: ${title}`}

} -
{children}
-
-
+
+ {title &&

{`Step ${stepNumber}: ${title}`}

} +
{children}
+
); }; diff --git a/src/components/StepNumber/index.js b/src/components/StepNumber/index.js index 719b94933..d9b1e49e4 100644 --- a/src/components/StepNumber/index.js +++ b/src/components/StepNumber/index.js @@ -1,11 +1,15 @@ +// src/components/StepNumber.js import React, { useContext } from 'react'; -import { StepContext } from '@site/src/components/Step'; +import { StepContext } from '@site/src/components/Steps'; import clsx from 'clsx'; import styles from './index.module.css'; const StepNumber = () => { - const stepNumber = useContext(StepContext); - return ({`${stepNumber}`}); + const stepNumber = useContext(StepContext); // Get the step number from the context + + return ( + {`${stepNumber}`} + ); }; export default StepNumber; diff --git a/src/components/Steps/index.js b/src/components/Steps/index.js index 75d576dc9..04660f0e5 100644 --- a/src/components/Steps/index.js +++ b/src/components/Steps/index.js @@ -1,28 +1,24 @@ // src/components/Steps.js -import React, { useEffect, useState, createContext, useContext } from 'react'; +import React, { useState } from 'react'; import clsx from 'clsx'; import styles from './index.module.css'; -let stepCounter = 0; +export const StepContext = React.createContext(); -export const StepContext = createContext(0); - -export const resetStepCounter = () => { - stepCounter = 0; -}; - -const Steps = ({ title, children }) => { - const [stepNumber, setStepNumber] = useState(stepCounter); - - useEffect(() => { - stepCounter += 1; - setStepNumber(stepCounter); - }, []); +const Steps = ({ children }) => { + const [stepCounter, setStepCounter] = useState(0); return ( - -
{children}
-
+
+ {React.Children.map(children, (child, index) => { + // Provide a unique step number for each child + return ( + + {child} + + ); + })} +
); }; diff --git a/src/css/custom.css b/src/css/custom.css index 16a7fb431..283063c91 100644 --- a/src/css/custom.css +++ b/src/css/custom.css @@ -236,7 +236,7 @@ img.center { display: block; } - .img-small { + img.small { max-width: 100%; } diff --git a/src/theme/DocItem/Layout/index.js b/src/theme/DocItem/Layout/index.js index a3f6452f3..a49944037 100644 --- a/src/theme/DocItem/Layout/index.js +++ b/src/theme/DocItem/Layout/index.js @@ -1,10 +1,9 @@ import React, { useEffect } from 'react'; import Layout from '@theme-original/DocItem/Layout'; -import { resetStepCounter } from '@site/src/components/Step'; export default function LayoutWrapper(props) { useEffect(() => { - resetStepCounter(); // Reset the counter whenever the layout is rendered + }, []); return ( diff --git a/static/assets/ecs-partial-task-definitions.png b/static/assets/ecs-partial-task-definitions.png new file mode 100644 index 000000000..cbdfeef95 Binary files /dev/null and b/static/assets/ecs-partial-task-definitions.png differ