diff --git a/404.html b/404.html index 891b37e8..d7f8f9b8 100644 --- a/404.html +++ b/404.html @@ -3966,6 +3966,8 @@ + + @@ -4147,6 +4149,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/concepts/index.html b/concepts/index.html index 29d8652b..568bc1bd 100644 --- a/concepts/index.html +++ b/concepts/index.html @@ -4037,6 +4037,8 @@ + + @@ -4218,6 +4220,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/concepts/next-steps/index.html b/concepts/next-steps/index.html index 94584fcb..3800adf6 100644 --- a/concepts/next-steps/index.html +++ b/concepts/next-steps/index.html @@ -4028,6 +4028,8 @@ + + @@ -4209,6 +4211,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/concepts/our-tech-stack/index.html b/concepts/our-tech-stack/index.html index 646e7c61..699482d2 100644 --- a/concepts/our-tech-stack/index.html +++ b/concepts/our-tech-stack/index.html @@ -4028,6 +4028,8 @@ + + @@ -4209,6 +4211,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/concepts/what-is-leverage/index.html b/concepts/what-is-leverage/index.html index 5a2e147d..f4ac014c 100644 --- a/concepts/what-is-leverage/index.html +++ b/concepts/what-is-leverage/index.html @@ -4035,6 +4035,8 @@ + + @@ -4216,6 +4218,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/concepts/what-leverage-can-do-for-you/index.html b/concepts/what-leverage-can-do-for-you/index.html index 7b7c5288..f5f3ce08 100644 --- a/concepts/what-leverage-can-do-for-you/index.html +++ b/concepts/what-leverage-can-do-for-you/index.html @@ -4035,6 +4035,8 @@ + + @@ -4216,6 +4218,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/concepts/why-leverage/index.html b/concepts/why-leverage/index.html index 57d42262..d44681d0 100644 --- a/concepts/why-leverage/index.html +++ b/concepts/why-leverage/index.html @@ -4048,6 +4048,8 @@ + + @@ -4229,6 +4231,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/es/bienvenido/index.html b/es/bienvenido/index.html index 8d8db733..e0aa5f61 100644 --- a/es/bienvenido/index.html +++ b/es/bienvenido/index.html @@ -4029,6 +4029,8 @@ + + @@ -4210,6 +4212,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/es/index.html b/es/index.html index 11b15962..3c1ded4f 100644 --- a/es/index.html +++ b/es/index.html @@ -6307,6 +6307,8 @@ + + @@ -6488,6 +6490,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/how-it-works/ref-architecture/considerations/index.html b/how-it-works/ref-architecture/considerations/index.html index 99827975..28f1545e 100644 --- a/how-it-works/ref-architecture/considerations/index.html +++ b/how-it-works/ref-architecture/considerations/index.html @@ -3973,6 +3973,8 @@ + + @@ -4154,6 +4156,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/how-it-works/ref-architecture/index.html b/how-it-works/ref-architecture/index.html index a366b080..9c4722ba 100644 --- a/how-it-works/ref-architecture/index.html +++ b/how-it-works/ref-architecture/index.html @@ -4028,6 +4028,8 @@ + + @@ -4209,6 +4211,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/index.html b/index.html index b3895083..3bb18604 100644 --- a/index.html +++ b/index.html @@ -6313,6 +6313,8 @@ + + @@ -6494,6 +6496,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/license/index.html b/license/index.html index 61d94377..49c3ecfa 100644 --- a/license/index.html +++ b/license/index.html @@ -3991,6 +3991,8 @@ + + @@ -4172,6 +4174,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/search/search_index.json b/search/search_index.json index 11c6824a..12237778 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"license/","title":"License","text":"

    MIT License

    Copyright \u00a9 2017 - 2020 binbash

    Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

    The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

    THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

    "},{"location":"concepts/","title":"Index","text":""},{"location":"concepts/#concepts","title":"Concepts","text":""},{"location":"concepts/#welcome","title":"Welcome!","text":"

    Welcome to Leverage's documentation! Here you will find the concepts you need to understand to work with our stack, the steps to try Leverage by yourself, and the extensive documentation about every aspect of our solution.

    "},{"location":"concepts/#getting-started","title":"Getting Started","text":"

    Feel free to explore the following pages to know more about Leverage.

    "},{"location":"concepts/next-steps/","title":"Next Steps","text":"

    Now that you know the basic concepts about Leverage feel free to give it a try or check out the User Guide section to go deeper into the implementation details. Links down below:

    "},{"location":"concepts/next-steps/#learn-more","title":"Learn More","text":"

    See Try Leverage to take the tutorial that will help you deploy a basic AWS Landing Zone via Leverage.

    See User Guide to take the comprehensive route to learn more about Leverage.

    See Work with us if you want to join us or know more about the team behind Leverage.

    "},{"location":"concepts/our-tech-stack/","title":"Our Tech Stack","text":"

    Leverage was built around the AWS Well Architected Framework and it uses a stack that includes Terraform, Ansible, Helm and other tools.

    We are also adopters and supporters of Kubernetes and the Cloud Native movement, which should become self-evident as you keep exploring our technology stack.

    "},{"location":"concepts/our-tech-stack/#why-did-we-choose-our-tech-stack","title":"Why did we choose our tech stack?","text":"Why AWS\u2753

    Amazon Web Services (AWS) is the world\u2019s most comprehensive and broadly adopted cloud platform, offering over 200 fully featured services from data centers globally. Millions of customers\u2014including the fastest-growing startups, largest enterprises, and leading government agencies\u2014are using AWS to lower costs, become more agile, and innovate faster.

    Build, Deploy, and Manage Websites, Apps or Processes On AWS' Secure, Reliable Network. AWS is Secure, Reliable, Scalable Services. HIPAA Compliant. Easily Manage Clusters. Global Infrastructure. Highly Scalable.

    Read More: What is AWS

    Why WAF (Well Architected Framework)\u2753

    AWS Well-Architected helps cloud architects to build secure, high-performing, resilient, and efficient infrastructure for their applications and workloads. Based on five pillars \u2014 operational excellence, security, reliability, performance efficiency, and cost optimization \u2014 AWS Well-Architected provides a consistent approach for customers and partners to evaluate architectures, and implement designs that can scale over time.

    Read More: AWS Well-architected

    Why Infra as Code (IaC) & Terraform\u2753

    Terraform allows to codify your application infrastructure, reduce human error and increase automation by provisioning infrastructure as code. With TF we can manage infrastructure across clouds and provision infrastructure across 300+ public clouds and services using a single workflow. Moreover it helps to create reproducible infrastructure and provision consistent testing, staging, and production environments with the same configuration.

    Terraform has everything we expect from a IaC framework: open source, cloud-agnostic provisioning tool that supported immutable infrastructure, a declarative language, and a client-only architecture.

    Read More

    Why Organizations\u2753

    AWS Organizations helps you centrally manage and govern your environment as you grow and scale your AWS resources. Using AWS Organizations, you can programmatically create new AWS accounts and allocate resources, group accounts to organize your workflows, apply policies to accounts or groups for governance, and simplify billing by using a single payment method for all of your accounts.

    Read More

    Why IAM and roles\u2753

    AWS Identity and Access Management (IAM) enables you to manage access to AWS services and resources securely. Using IAM, you can create and manage AWS users and groups, and use permissions to allow and deny their access to AWS resources.

    Read More

    Security | Why Web Application Firewall (WAF), Cloud Trail, Config, Guarduty\u2753

    Raise your security posture with AWS infrastructure and services. Using AWS, you will gain the control and confidence you need to securely run your business with the most flexible and secure cloud computing environment available today. As an AWS customer, you will benefit from AWS data centers and a network architected to protect your information, identities, applications, and devices. With AWS, you can improve your ability to meet core security and compliance requirements, such as data locality, protection, and confidentiality with our comprehensive services and features.

    Read More

    Why VPC\u2753

    Amazon Virtual Private Cloud (Amazon VPC) is a service that lets you launch AWS resources in a logically isolated virtual network that you define. You have complete control over your virtual networking environment, including selection of your own IP address range, creation of subnets, and configuration of route tables and network gateways. You can use both IPv4 and IPv6 for most resources in your virtual private cloud, helping to ensure secure and easy access to resources and applications.

    Read More

    Why Kubernetes (K8s) & AWS EKS\u2753

    Kubernetes, also known as K8s, is an open-source system for automating deployment, scaling, and management of containerized applications. It groups containers that make up an application into logical units for easy management and discovery. Kubernetes builds upon 15 years of experience of running production workloads at Google, combined with best-of-breed ideas and practices from the community.

    Amazon Elastic Kubernetes Service (Amazon EKS) gives you the flexibility to start, run, and scale Kubernetes applications in the AWS cloud or on-premises. Amazon EKS helps you provide highly-available and secure clusters and automates key tasks such as patching, node provisioning, and updates. Customers such as Intel, Snap, Intuit, GoDaddy, and Autodesk trust EKS to run their most sensitive and mission critical applications.

    EKS runs upstream Kubernetes and is certified Kubernetes conformant for a predictable experience. You can easily migrate any standard Kubernetes application to EKS without needing to refactor your code.

    Read More

    Why S3\u2753

    Amazon Simple Storage Service (Amazon S3) is an object storage service that offers industry-leading scalability, data availability, security, and performance. This means customers of all sizes and industries can use it to store and protect any amount of data for a range of use cases, such as data lakes, websites, mobile applications, backup and restore, archive, enterprise applications, IoT devices, and big data analytics. Amazon S3 provides easy-to-use management features so you can organize your data and configure finely-tuned access controls to meet your specific business, organizational, and compliance requirements. Amazon S3 is designed for 99.999999999% (11 9's) of durability, and stores data for millions of applications for companies all around the world.

    Read More

    Why RDS\u2753

    Amazon Relational Database Service (Amazon RDS) makes it easy to set up, operate, and scale a relational database in the cloud. It provides cost-efficient and resizable capacity while automating time-consuming administration tasks such as hardware provisioning, database setup, patching and backups. It frees you to focus on your applications so you can give them the fast performance, high availability, security and compatibility they need.

    Amazon RDS is available on several database instance types - optimized for memory, performance or I/O - and provides you with six familiar database engines to choose from, including Amazon Aurora, PostgreSQL, MySQL, MariaDB, Oracle Database, and SQL Server. You can use the AWS Database Migration Service to easily migrate or replicate your existing databases to Amazon RDS.

    Read More

    Why Hashicorp Vault\u2753

    As many organizations migrate to the public cloud, a major concern has been how to best secure data, preventing it from unauthorized access or exfiltration.

    Deploying a product like HashiCorp Vault gives you better control of your sensitive credentials and helps you meet cloud security standards.

    HashiCorp Vault is designed to help organizations manage access to secrets and transmit them safely within an organization. Secrets are defined as any form of sensitive credentials that need to be tightly controlled and monitored and can be used to unlock sensitive information. Secrets could be in the form of passwords, API keys, SSH keys, RSA tokens, or OTP.

    HashiCorp Vault makes it very easy to control and manage access by providing you with a unilateral interface to manage every secret in your infrastructure. Not only that, you can also create detailed audit logs and keep track of who accessed what.

    Manage Secrets and Protect Sensitive Data. Secure, store and tightly control access to tokens, passwords, certificates, encryption keys for protecting secrets and other sensitive data using a UI, CLI, or HTTP API.

    Read More

    "},{"location":"concepts/what-is-leverage/","title":"What is Leverage?","text":"

    Leverage was made out of a significant amount of knowledge, acquired through several years of experience, turned into an ecosystem of code, tools, and workflows that enables you to build the AWS infrastructure for your applications and services quickly and securely.

    Since all the code and modules are already built, we can get you up and running up to 10x faster than a consulting company -- typically in just a few weeks! -- and on top of code that is thoroughly documented, tested, and has been proven in production at dozens of other project deployments.

    "},{"location":"concepts/what-is-leverage/#core-components","title":"Core Components","text":"

    Our focus is on creating reusable, high quality Cloud Infrastructure code, through our core components:

    "},{"location":"concepts/what-is-leverage/#video-presentation","title":"Video Presentation","text":"

    Check out this intro video that explains what Leverage is in less than 5 minutes:

    "},{"location":"concepts/what-leverage-can-do-for-you/","title":"What can Leverage do for you?","text":"

    Still not convinced? Check out the following sections which describe what Leverage can bring on the table depending on your role in a company.

    "},{"location":"concepts/what-leverage-can-do-for-you/#leverage-for-cios-ctos-and-vps-of-engineering","title":"Leverage for CIOs, CTOs and VPs of Engineering","text":"Accelerate development and optimize costs

    Annual cost savings are a new standard and best practice. Profits are being targeted to business development, regulatory and compliance needs. Resulting in a reduction of pressure on IT and development budgets, granting the opportunity to focus in new features and boost innovation.

    Modernize applications architecture (loosely coupled and modular)

    Strategically decompose the monolith into a fine-grained, loosely coupled modular architecture to increase both development and business agility. When the system architecture is designed to allow teams to test, deploy and change systems without relying on other teams, they require little communication to get the job done. In other words, both the architecture and the teams are loosely coupled.

    Innovation - Rapidly adopt new technologies and reduce development time

    Use Leverage Reference Architecture and for AWS + our libraries to provide a collection of cloud application architecture components to build and deploy faster in the cloud. Building a cloud Landing Zone is complex, especially since most companies have little or no expertise in this area. And it can take a significant amount of time to get it right. Leverage a reference architecture to give you an AWS Landing Zone that provides a consistent and solid \"foundations\" to bootstrap your project in the cloud. The code solution implements the best AWS Well-Architected Framework practices as well as the battle-tested tech experience and years of knowledge of our contributors.

    Hours or days, not weeks or months

    Leverage implements infrastructure as code at all times. We have rolled this out using Terraform, and has been fully proven in AWS and other Terraform providers that are part of our reference architecture like Kubernetes, Helm and Hashicorp Vault. By using the Leverage CLI, our binary will help you to quickly bootstrap your AWS Landing Zone in a matter of hours (or at most a few days).

    It's not just a pile of scripts

    It's not just another layer of untested, one time and stand-alone developed scripts. The code is modularized and well designed under best practices, our Leverage CLI has both unit and integration tests. While our Terraform code has been extensively E2E tested. Moreover, 100% of the code is yours (to modify, extend, reuse, etc), with no vendor locking and vendor licensing fees. We use the MIT license, so you can take the code, modify it and use it as your private code. All we ask in return is a friendly greeting and that (if possible) consider contributing to binbash Leverage project. Implement Leverage yourself or we can deploy it for you!

    DevOps culture and methodologies

    Team agility and continuous improvements based on feedback loops are some of the main drivers of cloud adoption, and IAC's goal of reducing the frequency of deployment of both infrastructure and applications are some of the most important aspects of DevOps practices. We continue to apply these methodologies to achieve a DevOps first culture. We have experienced and demonstrated their potential and have practiced them in dozens of projects over the past 5 years. The Leverage reference architecture for AWS combines a set of application best practices, technology patterns and a common CI/CD deployment approach through Leverage CLI for all your application environments. As a result, we are pursuing a world-class software delivery performance through optimized collaboration, communication, reliability, stability, scalability and security at ever-decreasing cost and effort.

    Repeatable, composable and extensible immutable infrastructure

    The best high-performance development teams create and recreate their development and production environments using infrastructure as code (IaC) as part of their daily development processes. The Leverage CLI allows to build repeatable and immutable infrastructure. So your cloud development, staging and production environments will consistently be the same.

    "},{"location":"concepts/what-leverage-can-do-for-you/#leverage-for-devops-engineers-cloud-architects-and-software-engineers","title":"Leverage for DevOps Engineers, Cloud Architects and Software Engineers","text":"Provisioning infrastructure as code (Iac)

    Instead of manually provisioning infrastructure, the real benefits of cloud adoption come from orchestrating infrastructure through code. However, this is really challenging to achieve, there are literally thousands of tiny things and configs to consider and they all seem to take forever. Our experience is that it can take teams up to 24 months to achieve a desired infra state in AWS. By using Leverage you could get your AWS Landing-Zone in few weeks, or your entire AWS Well-Architected based cloud solution within 1 to 3 months (depending on your project complexity needs).

    We've done it before (don't reinvent the wheel)

    Often, development teams have similar and recurring requests such as: iam, networking, security, storage, databases, compute and secret management, etc. binbash Leverage has been proven in dozen of project to create software-defined (IaC) AWS environments.

    Best practices baked in the code

    Leverage provides IaC reference architecture for AWS hosted applications infrastructure. This is baked into the code as a combination of the best AWS Well-Architected framework practices and the experience of having successfully orchestrated many customers to AWS cloud.

    On-demand infra deployment

    Leverage provides your DevOps, Cloud, SRE and Development teams with the ability to provision on-demand infrastructure, granting that it will meet the rigorous security requirements of modern cloud native best practices. It fully implements AWS Well-Architected Framework (WAF) and best DevOps practices, including practices, including collaboration, version control, CI/CD, continuous testing, cloud infrastructure and losely couple architectures.

    Easier to support and maintain

    Leverage IaC approach significantly reduce your AWS infra deployment, config and support burden and reduce risk. Our code backed provisioning has been rigorously tested many times, eliminating the possibility of manual errors. Because the entire infrastructure is deployed from the same proven code, the consistency your cloud environments will simplify your setup and maintenance. Use the versioned code to iterate and improve, extend or compose your internal processes as your cloud operating model evolves.

    There is no vendor lock-in. You own the solution

    With Leverage you own 100% of the code with no lock-in clauses. If you choose to leave Leverage, you will still have your entire AWS cloud infrastructure that you can access and manage. If you drop Leverage, you will still have your entire cloud native infrastructure code (Terraform, Helm, Ansible, Python). It\u2019s 100% Open Source on GitHub and is free to use with no strings attached under MIT license (no licensing fees), and you are free to commercially and privately use, distribute and modify.

    Consistent environments (Dev/prod parity)

    Keep development, staging, and production cloud envs parity. Infrastructure as code allow us to define and provisioning all infrastructure components (think networks, load balancers, databases, security, compute and storage, etc.) using code. Leverage uses Terraform as the IaC language, to deploy and setup all the AWS, Kubernetes and Hashicorp Vault resources (it has support for multiple cloud and technology providers). Backed by code, your cloud environments are built exactly the identical way all the time. Finally, this will result in no differences between development, staging and production.

    Development in production like envs

    IaC allows your development team to deploy and test the AWS infrastructure as if it were application code. Your development is always done in production-like environments. Provision your cloud test and sandbox environments on demand and tear them down when all your testing is complete. Leverage takes all the pain out of maintaining production-like environments, with stable infra releases. It eliminates the unpredictability of wondering if what actually worked in your development envs will work in production.

    "},{"location":"concepts/why-leverage/","title":"Why Leverage?","text":"

    By implementing our Reference Architecture for AWS and the Infrastructure as Code (IaC) Library via Leverage CLI, you will get your entire Cloud Native Application Infrastructure deployed in only a few weeks.

    Did you know?

    You can roll out Leverage by yourself or we can implement it for you!

    "},{"location":"concepts/why-leverage/#the-problem-and-our-solution","title":"The problem and our solution","text":""},{"location":"concepts/why-leverage/#what-are-the-problems-you-might-be-facing","title":"What are the problems you might be facing?","text":"Figure: Why Leverage? The problem. (Source: binbash, \"Leverage Presentation: Why you should use Leverage?\", accessed June 15th 2021)."},{"location":"concepts/why-leverage/#what-is-our-solution","title":"What is our solution?","text":"Figure: Why Leverage? The solution. (Source: binbash, \"Leverage Presentation: Why you should use Leverage?\", accessed June 15th 2021)."},{"location":"es/bienvenido/","title":"Bienvenido","text":""},{"location":"es/bienvenido/#proximamente","title":"Pr\u00f3ximamente","text":""},{"location":"how-it-works/ref-architecture/","title":"How it works","text":""},{"location":"how-it-works/ref-architecture/#how-it-works","title":"How it works","text":"

    The objective of this document is to explain how the binbash Leverage Reference Architecture for AWS works, in particular how the Reference Architecture model is built and why we need it.

    "},{"location":"how-it-works/ref-architecture/#overview","title":"Overview","text":"

    This documentation contains all the guidelines to create binbash Leverage Reference Architecture for AWS that will be implemented on the Projects\u2019 AWS infrastructure.

    We're assuming you've already have in place your AWS Landing Zone based on the First Steps guide.

    Our Purpose

    Info

    This documentation will provide a detailed reference of the tools and techs used, the needs they address and how they fit with the multiple practices we will be implementing.

    "},{"location":"how-it-works/ref-architecture/considerations/","title":"Considerations","text":""},{"location":"how-it-works/ref-architecture/considerations/#important-considerations","title":"Important Considerations","text":"

    Assumptions

    Info

    We will explore the details of all the relevant Client application stacks, CI/CD processes, monitoring, security, target service level objective (SLO) and others in a separate document.

    "},{"location":"try-leverage/","title":"Index","text":""},{"location":"try-leverage/#try-leverage","title":"Try Leverage","text":""},{"location":"try-leverage/#before-you-begin","title":"Before you begin","text":"

    The objective of this guide is to introduce you to our binbash Leverage Reference Architecture for AWS workflow through the complete deployment of a basic landing zone configuration.

    The Leverage Landing Zone is the smallest possible fully functional configuration. It lays out the base infrastructure required to manage the environment: billing and financial management, user management, security enforcement, and shared services and resources. Always following the best practices layed out by the AWS Well-Architected Framework to ensure quality and to provide a solid base to build upon. This is the starting point from which any Leverage user can and will develop all the features and capabilities they may require to satisfy their specific needs.

    Figure: Leverage Landing Zone architecture components diagram."},{"location":"try-leverage/#about-this-guide","title":"About this guide","text":"

    In this guide you will learn how to:

    Upon completion of this guide you will gain an understanding of the structure of a project as well as familiarity with the tooling used to manage it.

    To begin your journey into creating your first Leverage project, continue to the next section of the guide where you will start by setting up your AWS account.

    "},{"location":"try-leverage/add-aws-accounts/","title":"Add more AWS Accounts","text":""},{"location":"try-leverage/add-aws-accounts/#brief","title":"Brief","text":"

    You can add new AWS accounts to your Leverage project by following the steps in this page.

    Important

    In the examples below, we will be using apps-prd as the account we will be adding and it will be created in the us-east-1 region.

    "},{"location":"try-leverage/add-aws-accounts/#create-the-new-account-in-your-aws-organization","title":"Create the new account in your AWS Organization","text":"
    1. Go to management/global/organizations.
    2. Edit the locals.tf file to add the account to the local accounts variable.
          accounts = {\n...\n...\napps-prd = {\nemail     = \"aws+apps-prd@yourcompany.com\",\nparent_ou = \"apps\"\n}\n}\n
      Note that the apps organizational unit (OU) is being used as the parent OU of the new account. If you need to use a new OU you can add it to organizational_units variable in the same file.
    3. Run the Terraform workflow to apply the new changes. Typically that would be this:
      leverage terraform init\nleverage terraform apply\n

    Authentication error

    Note this layer was first applied before using the boostrap user. Now, that we are working with SSO, credentials have changed. So, if this is the first account you add you'll probably get this error applying: \"Error: error configuring S3 Backend: no valid credential sources for S3 Backend found.\" In this case running leverage tf init -reconfigure will fix the issue.

    1. Add the new account to the <project>/config/common.tfvars file. The new account ID should have been displayed in the output of the previous step, e.g.:

      aws_organizations_account.accounts[\"apps-prd\"]: Creation complete after 14s [id=999999999999]\n
      Note the id, 999999999999.

      ...so please grab it from there and use it to update the file as shown below:

      accounts = {\n\n[...]\n\napps-prd = {\nemail = \"<aws+apps-prd@yourcompany.com>\",\n        id    = \"<add-the-account-id-here>\"\n}\n}\n
      5. Since you are using SSO in this project, permissions on the new account must be granted before we can move forward. Add the right permissions to the management/global/sso/account_assignments.tf file. For the example:
       # -------------------------------------------------------------------------\n# apps-prd account\n# -------------------------------------------------------------------------\n{\naccount             = var.accounts.apps-prd.id,\npermission_set_arn  = module.permission_sets.permission_sets[\"Administrator\"].arn,\npermission_set_name = \"Administrator\",\nprincipal_type      = local.principal_type_group\nprincipal_name      = local.groups[\"administrators\"].name\n},\n{\naccount             = var.accounts.apps-prd.id,\npermission_set_arn  = module.permission_sets.permission_sets[\"DevOps\"].arn,\npermission_set_name = \"DevOps\",\nprincipal_type      = local.principal_type_group\nprincipal_name      = local.groups[\"devops\"].name\n},\n
      Note your needs can vary, these permissions are just an example, please be careful with what you are granting here.

      Apply these changes:

      leverage terraform apply\n
      And you must update your AWS config file accordingly by running this:
      leverage aws configure sso\n

    Good! Now you are ready to create the initial directory structure for the new account. The next section will guide through those steps.

    "},{"location":"try-leverage/add-aws-accounts/#create-and-deploy-the-layers-for-the-new-account","title":"Create and deploy the layers for the new account","text":"

    In this example we will create the apps-prd account structure by using the shared as a template.

    "},{"location":"try-leverage/add-aws-accounts/#create-the-initial-directory-structure-for-the-new-account","title":"Create the initial directory structure for the new account","text":"
    1. Ensure you are at the root of this repository
    2. Now create the directory structure for the new account:
      mkdir -p apps-prd/{global,us-east-1}\n
    3. Set up the config files:
      1. Create the config files for this account:
        cp -r shared/config apps-prd/config\n
      2. Open apps-prd/config/backend.tfvars and replace any occurrences of shared with apps-prd.
      3. Do the same with apps-prd/config/account.tfvars
    "},{"location":"try-leverage/add-aws-accounts/#create-the-terraform-backend-layer","title":"Create the Terraform Backend layer","text":"
    1. Copy the layer from an existing one:

      cp -r shared/us-east-1/base-tf-backend apps-prd/us-east-1/base-tf-backend\n

      Info

      If the source layer was already initialized you should delete the previous Terraform setup using sudo rm -rf .terraform* in the target layer's directory, e.g. rm -rf apps-prd/us-east-1/base-tf-backend/.terraform*

    2. Go to the apps-prd/us-east-1/base-tf-backend directory, open the config.tf file and comment the S3 backend block. E.g.:

      #backend \"s3\" {\n#    key = \"shared/tf-backend/terraform.tfstate\"\n#}\n
      We need to do this for the first apply of this layer.

    3. Now run the Terraform workflow to initialize and apply this layer. The flag --skip-validation is needed here since the bucket does not yet exist.

      leverage terraform init --skip-validation\nleverage terraform apply\n

    4. Open the config.tf file again uncommenting the block commented before and replacing shared with apps-prd. E.g.:
      backend \"s3\" {\nkey = \"apps-prd/tf-backend/terraform.tfstate\"\n}\n
    5. To finish with the backend layer, re-init to move the tfstate to the new location. Run:
      leverage terraform init\n
      Terraform will detect that you are trying to move from a local to a remote state and will ask for confirmation.
      Initializing the backend...\nAcquiring state lock. This may take a few moments...\nDo you want to copy existing state to the new backend?\n    Pre-existing state was found while migrating the previous \"local\" backend to the\n    newly configured \"s3\" backend. No existing state was found in the newly\n    configured \"s3\" backend. Do you want to copy this state to the new \"s3\"\nbackend? Enter \"yes\" to copy and \"no\" to start with an empty state.\n\n    Enter a value: 
      Enter yes and hit enter.
    "},{"location":"try-leverage/add-aws-accounts/#create-the-security-base-layer","title":"Create the security-base layer","text":"
    1. Copy the layer from an existing one: From the repository root run:

      cp -r shared/us-east-1/security-base apps-prd/us-east-1/security-base\n

      Info

      If the source layer was already initialized you should delete the previous Terraform setup using sudo rm -rf .terraform* in the target layer's directory, e.g. rm -rf apps-prd/us-east-1/security-base/.terraform*

    2. Go to the apps-prd/us-east-1/security-base directory and open the config.tf file replacing any occurrences of shared with apps-prd E.g. this line should be:

      backend \"s3\" {\nkey = \"apps-prd/security-base/terraform.tfstate\"\n}\n

    3. Init and apply the layer

      leverage tf init\nleverage tf apply\n
    "},{"location":"try-leverage/add-aws-accounts/#create-the-network-layer","title":"Create the network layer","text":"
    1. Copy the layer from an existing one: From the root of the repository run this:

      cp -r shared/us-east-1/base-network apps-prd/us-east-1/base-network\n

      Info

      If the source layer was already initialized you should delete the previous Terraform setup using sudo rm -rf .terraform* in the target layer's directory, e.g. rm -rf apps-prd/us-east-1/base-network/.terraform*

    2. Go to the apps-prd/us-east-1/base-network directory and open the config.tf file replacing any occurrences of shared with apps-prd. E.g. this line should be:

      backend \"s3\" {\nkey = \"apps-prd/network/terraform.tfstate\"\n}\n

    3. Open the file locals.tf and set the new account's CIDRs.

      vpc_cidr_block = \"172.19.0.0/20\"\nazs = [\n\"${var.region}a\",\n\"${var.region}b\",\n#\"${var.region}c\",\n#\"${var.region}d\",\n]\n\nprivate_subnets_cidr = [\"172.19.0.0/21\"]\nprivate_subnets = [\n\"172.19.0.0/23\",\n\"172.19.2.0/23\",\n#\"172.19.4.0/23\",\n#\"172.19.6.0/23\",\n]\n\npublic_subnets_cidr = [\"172.19.8.0/21\"]\npublic_subnets = [\n\"172.19.8.0/23\",\n\"172.19.10.0/23\",\n#\"172.19.12.0/23\",\n#\"172.19.14.0/23\",\n]\n
      Note here only two AZs are enabled, if needed uncomment the other ones in the three structures.

      Do not overlap CIDRs!

      Be careful when chosing CIDRs. Avoid overlaping CIDRs between accounts. If you need a reference on how to chose the right CIDRs, please see here.

      Calculate CIDRs

      To calculate CIDRs you can check this playbook.

    4. Init and apply the layer

      leverage tf init\nleverage tf apply\n

    5. Create the VPC Peering between the new account and the VPC of the Shared account. Edit file shared/us-east-1/base-network/config.tf and add provider and remote state for the created account.

      provider \"aws\" {\nalias                   = \"apps-prd\"\nregion                  = var.region\nprofile                 = \"${var.project}-apps-prd-devops\"\n}\n\ndata \"terraform_remote_state\" \"apps-prd-vpcs\" {\nfor_each = {\nfor k, v in local.apps-prd-vpcs :\nk => v if !v[\"tgw\"]\n}\n\nbackend = \"s3\"\n\nconfig = {\nregion  = lookup(each.value, \"region\")\nprofile = lookup(each.value, \"profile\")\nbucket  = lookup(each.value, \"bucket\")\nkey     = lookup(each.value, \"key\")\n}\n}\n
      Edit file shared/us-east-1/base-network/locals.tf and under
      #\n# Data source definitions\n#\n
      ...add the related structure:
      #\n# Data source definitions\n#\napps-prd-vpcs = {\napps-prd-base = {\nregion  = var.region\nprofile = \"${var.project}-apps-prd-devops\"\nbucket  = \"${var.project}-apps-prd-terraform-backend\"\nkey     = \"apps-prd/network/terraform.tfstate\"\ntgw     = false\n}\n}\n
      Edit file shared/us-east-1/base-network/vpc_peerings.tf (if this is your first added account the file won\u00b4t exist, please crate it) and add the peering definition:
      #\n# VPC Peering: AppsPrd VPC => Shared VPC\n#\nmodule \"vpc_peering_apps_prd_to_shared\" {\nsource = \"github.com/binbashar/terraform-aws-vpc-peering.git?ref=v6.0.0\"\n\nfor_each = {\nfor k, v in local.apps-prd-vpcs :\nk => v if !v[\"tgw\"]\n}\n\nproviders = {\naws.this = aws\naws.peer = aws.apps-prd\n}\n\nthis_vpc_id = module.vpc.vpc_id\npeer_vpc_id = data.terraform_remote_state.apps-prd-vpcs[each.key].outputs.vpc_id\n\nthis_rts_ids = concat(module.vpc.private_route_table_ids, module.vpc.public_route_table_ids)\npeer_rts_ids = concat(\ndata.terraform_remote_state.apps-prd-vpcs[each.key].outputs.public_route_table_ids,\ndata.terraform_remote_state.apps-prd-vpcs[each.key].outputs.private_route_table_ids\n)\n\nauto_accept_peering = true\n\ntags = merge(local.tags, {\n\"Name\"             = \"${each.key}-to-shared\",\n\"PeeringRequester\" = each.key,\n\"PeeringAccepter\"  = \"shared\"\n})\n}\n
      Apply the changes (be sure to CD into shared/us-east-1/base-network layer for doing this):
      leverage terraform init\nleverage terraform apply\n

    "},{"location":"try-leverage/add-aws-accounts/#done","title":"Done!","text":"

    That should be it. At this point you should have the following:

    1. A brand new AWS account in your AWS organization.
    2. Working configuration files for both existing layers and any new layer you add in the future.
    3. A remote Terraform State Backend for this new account.
    4. Roles and policies (SSO) that are necessary to access the new account.
    5. The base networking resources ready to host your compute services.
    6. The VPC peerings between the new account and shared
    "},{"location":"try-leverage/add-aws-accounts/#next-steps","title":"Next steps","text":"

    Now you have a new account created, so what else?

    To keep creating infra on top of this binbash Leverage Landing Zone with this new account added, please check:

    "},{"location":"try-leverage/aws-account-setup/","title":"Creating your AWS Management account","text":""},{"location":"try-leverage/aws-account-setup/#create-the-first-aws-account","title":"Create the first AWS account","text":"

    First and foremost you'll need to create an AWS account for your project.

    Attention

    Note this will be your management account and has to be called <project-name>-management.

    E.g. if your project is called binbash then your account should be binbash-management.

    Follow the instructions here.

    This will be the management account for your AWS Organization and the email address you use for signing up will be the root user of this account -- you can see this user represented in the architecture diagram.

    Since the root user is the main access point to your account it is strongly recommended that you keep its credentials (email, password) safe by following AWS best practices.

    Tip

    To protect your management account, enabling Multi Factor Authentication is highly encouraged. Also, reviewing the account's billing setup is always a good idea before proceeding.

    For more details on setting up your AWS account: Organization account setup guide

    "},{"location":"try-leverage/aws-account-setup/#create-a-bootstrap-user-with-temporary-administrator-permissions","title":"Create a bootstrap user with temporary administrator permissions","text":"

    Leverage needs a user with temporary administrator permissions in order to deploy the initial resources that will form the foundations you will then use to keep building on. That initial deployment is called the bootstrap process and thus the user required for that is called \"the bootstrap user\".

    To create that user, navigate to the IAM page and create a user named mgmt-org-admin following steps 2 and 3 of this leverage doc.

    Info

    Bear in mind that the page for creating users may change from time to time but the key settings for configuring the bootstrap user are the following:

    Usually the last step of the user creation should present you the following information:

    Make a note of all of these and keep them in a safe place as you will need them in the following steps.

    Info

    If you are only getting the bootstrap user credentials for someone else in your team or in Binbash's team, then please share that using a secure way (e.g. password management service, GPG keys, etc).

    Info

    If user was set up with the option \"Force to change password on first login\", you should log into the console to do so.

    "},{"location":"try-leverage/aws-account-setup/#next-steps","title":"Next steps","text":"

    You have successfully created and configured the AWS account for your Leverage project. From now on, almost all interactions with the AWS environment (with few notable exceptions) will be performed via Leverage.

    Next, you will setup all required dependencies to work on a Leverage project in your local machine.

    "},{"location":"try-leverage/enabling-sso/","title":"Configure SSO settings","text":""},{"location":"try-leverage/enabling-sso/#enable-sso","title":"Enable SSO","text":"

    Let's start by configuring SSO settings. Open this file: <your_project>/config/common.tfvars and update the following lines:

    sso_enabled   = false\nsso_start_url = \"https://bbleverage.awsapps.com/start\"\n

    Change sso_enabled to true as follows to enable SSO support:

    sso_enabled   = true\n

    Now you need to set the sso_start_url with the right URL. To find that, navigate here: https://us-east-1.console.aws.amazon.com/singlesignon/home -- you should be already logged in to the Management account for this to work. You should see a \"Settings summary\" panel on the right of the screen that shows the \"AWS access portal URL\". Copy that and use it to replace the value in the sso_start_url entry. Below is an example just for reference:

    sso_start_url = \"https://d-xyz01234567.awsapps.com/start\"\n

    Customize the AWS access portal URL

    The 'AWS access portal URL' can be customized to use a more friendly name. Check the official documentation for that.

    Further info on configuring SSO

    There is more information on how to configure SSO here.

    "},{"location":"try-leverage/enabling-sso/#update-backend-profiles-in-the-management-account","title":"Update backend profiles in the management account","text":"

    It's time to set the right profile names in the backend configuration files. Open this file: management/config/backend.tfvars and change the profile value from this:

    profile = \"me-bootstrap\"\n
    To this:
    profile = \"me-management-oaar\"\n
    Please note that in the examples above my short project name is me which is used as a prefix and it's the part that doesn't get replaced.

    "},{"location":"try-leverage/enabling-sso/#activate-your-sso-user-and-set-up-your-password","title":"Activate your SSO user and set up your password","text":"

    The SSO users you created when you provisioned the SSO layer need to go through an email activation procedure.

    The user is the one you set in the project.yaml file at the beginning, in this snippet:

    users:\n- first_name: the-name\nlast_name: the-last-name\nemail: user@domain.co\ngroups:\n- administrators\n- devops\n

    To activate the user find the instructions here.

    Once SSO user's have been activated, they will need to get their initial password so they are able to log in. Check out the steps for that here.

    Basically:

    "},{"location":"try-leverage/enabling-sso/#configure-the-cli-for-sso","title":"Configure the CLI for SSO","text":"

    Almost there. Let's try the SSO integration now.

    "},{"location":"try-leverage/enabling-sso/#configure-your-sso-profiles","title":"Configure your SSO profiles","text":"

    Since this is your first time using that you will need to configure it by running this:

    leverage aws configure sso\n

    Follow the wizard to get your AWS config file created for you. There is more info about that here.

    "},{"location":"try-leverage/enabling-sso/#verify-on-a-layer-in-the-management-account","title":"Verify on a layer in the management account","text":"

    To ensure that worked, let's run a few commands to verify:

    1. We'll use sso for the purpose of this example
    2. Move to the management/global/sso layer
    3. Run: leverage tf plan
    4. You should get this error: \"Error: error configuring S3 Backend: no valid credential sources for S3 Backend found.\"
    5. This happens because so far you have been running Terraform with a different AWS profile (the bootstrap one). Luckily the fix is simple, just run this: leverage tf init -reconfigure. Terraform should reconfigure the AWS profile in the .terraform/terraform.tfstate file.
    6. Now try running that leverage tf plan command again
    7. This time it should succeed, you should see the message: No changes. Your infrastructure matches the configuration.

    Note if you still have the same error, try clearing credentials with:

    leverage aws sso logout && leverage aws sso login\n
    "},{"location":"try-leverage/enabling-sso/#next-steps","title":"Next steps","text":"

    You successfully enabled SSO.

    Next, you will orchestrate the remaining accounts, security and shared.

    "},{"location":"try-leverage/leverage-project-setup/","title":"Create a Leverage project","text":"

    A Leverage project starts with a simple project definition file that you modify to suit your needs. That file is then used to render the initial directory layout which, at the end of this guide, will be your reference architecture. Follow the sections below to begin with that.

    The account's name will be given by your project's name followed by -management, since Leverage uses a suffix naming system to differentiate between the multiple accounts of a project. For this guide we'll stick to calling the project MyExample and so, the account name will be myexample-management.

    Along the same line, we'll use the example.com domain for the email address used to register the account. Adding a -aws suffix to the project's name to indicate that this email address is related to the project's AWS account, we end up with a registration email that looks like myexample-aws@example.com.

    Email addresses for AWS accounts.

    Each AWS account requires having a unique email address associated to it. The Leverage Reference Architecture for AWS makes use of multiple accounts to better manage the infrastructure, as such, you will need different addresses for each one. Creating a new email account for each AWS is not a really viable solution to this problem, a better approach is to take advantage of mail services that support aliases. For information regarding how this works: Email setup for your AWS account.

    "},{"location":"try-leverage/leverage-project-setup/#create-the-project-directory","title":"Create the project directory","text":"

    Each Leverage project lives in its own working directory. Create a directory for your project as follows:

    mkdir myexample\ncd myexample\n

    "},{"location":"try-leverage/leverage-project-setup/#initialize-the-project","title":"Initialize the project","text":"

    Create the project definition file by running the following command:

    $ leverage project init\n[18:53:24.407] INFO     Project template found. Updating.                                                                                              [18:53:25.105] INFO     Finished updating template.                                                                                                    [18:53:25.107] INFO     Initializing git repository in project directory.                                                                              [18:53:25.139] INFO     No project configuration file found. Dropping configuration template project.yaml.                                             [18:53:25.143] INFO     Project initialization finished.\n

    The command above should create the project definition file (project.yaml) and should initialize a git repository in the current working directory. This is important because Leverage projects by-design rely on specific git conventions and also because it is assumed that you will want to keep your infrastructure code versioned.

    "},{"location":"try-leverage/leverage-project-setup/#modify-the-project-definition-file","title":"Modify the project definition file","text":"

    Open the project.yaml file and fill in the required information.

    Typically the placeholder values between < and > symbols are the ones you would want to edit however you are welcome to adjust any other values to suit your needs.

    For instance, the following is a snippet of the project.yaml file in which the values for project_name and short_name have been set to example and ex respectively:

    project_name: example\nshort_name: ex\nprimary_region: us-east-1\nsecondary_region: us-west-2\n...\n

    The project_name field only accepts lowercase alphanumeric characters and allows hyphens('-'). For instance, valid names could be 'example' or 'leveragedemo' or 'example-demo'

    The short_name field only accepts 2 to 4 lowercase alpha characters. For instance, valid names could be 'exam or 'leve or 'ex

    We typically use as 1ry us-east-1 and 2ry us-west-2 as our default regions for the majority of our projects. However, please note that these regions may not be the most fitting choice for your specific use case. For detailed guidance, we recommend following these provided guidelines.

    Another example is below. Note that the management, security, and shared accounts have been updated with slightly different email addresses (actually aws+security@example.com and aws+shared@example.com are email aliases of aws@example.com which is a convenient trick in some cases):

    ...\norganization:\n  accounts:\n  - name: management\n    email: aws@example.com\n  - name: security\n    email: aws+security@example.com\n  - name: shared\n    email: aws+shared@example.com\n...\n

    Finally, here's another example snippet that shows how you can define users and assign them to groups:

    ...\nusers:\n- first_name: Jane\n  last_name: Doe\n  email: jane.doe@example.com\n  groups:\n  - administrators\n  - devops\n- first_name: Foo\n  last_name: Bar\n  email: foo.bar@example.com\n  groups:\n  - devops\n...\n

    Note these users will be the ones used later for SSO access.

    The project definition file includes other entries but the ones shown above are the most frequently updated.

    "},{"location":"try-leverage/leverage-project-setup/#configure-bootstrap-credentials","title":"Configure \"bootstrap\" credentials","text":"

    To be able to interact with your AWS environment you first need to configure the credentials to enable AWS CLI to do so. Provide the keys obtained in the previous account creation step to the command by any of the available means.

    ManuallyFile selectionProvide file in command

    leverage credentials configure --type BOOTSTRAP\n
    [09:37:17.530] INFO     Loading configuration file.\n[09:37:18.477] INFO     Loading project environment configuration file.\n[09:37:20.426] INFO     Configuring bootstrap credentials.\n> Select the means by which you'll provide the programmatic keys: Manually\n> Key: AKIAU1OF18IXH2EXAMPLE\n> Secret: ****************************************\n[09:37:51.638] INFO     Bootstrap credentials configured in: /home/user/.aws/me/credentials\n[09:37:53.497] INFO     Fetching management account id.\n[09:37:53.792] INFO     Updating project configuration file.\n[09:37:55.344] INFO     Skipping assumable roles configuration.\n

    leverage credentials configure --type BOOTSTRAP\n
    [09:37:17.530] INFO     Loading configuration file.\n[09:37:18.477] INFO     Loading project environment configuration file.\n[09:37:20.426] INFO     Configuring bootstrap credentials.\n> Select the means by which you'll provide the programmatic keys: Path to an access keys file obtained from AWS\n> Path to access keys file: ../bootstrap_accessKeys.csv\n[09:37:51.638] INFO     Bootstrap credentials configured in: /home/user/.aws/me/credentials\n[09:37:53.497] INFO     Fetching management account id.\n[09:37:53.792] INFO     Updating project configuration file.\n[09:37:55.344] INFO     Skipping assumable roles configuration.\n

    leverage credentials configure --type BOOTSTRAP --credentials-file ../bootstrap_accessKeys.csv\n
    [09:37:17.530] INFO     Loading configuration file.\n[09:37:18.477] INFO     Loading project environment configuration file.\n[09:37:20.426] INFO     Configuring bootstrap credentials.\n[09:37:51.638] INFO     Bootstrap credentials configured in: /home/user/.aws/me/credentials\n[09:37:53.497] INFO     Fetching management account id.\n[09:37:53.792] INFO     Updating project configuration file.\n[09:37:55.344] INFO     Skipping assumable roles configuration.\n

    More information on credentials configure

    During the credentials setup, the AWS account id is filled in for us in the project configuration file.

    ...\norganization:\naccounts:\n- name: management\nemail: myexample-aws@example.com\nid: '000123456789'\n...\n
    "},{"location":"try-leverage/leverage-project-setup/#create-the-configured-project","title":"Create the configured project","text":"

    Now you will finally create all the infrastructure definition in the project.

    leverage project create\n
    [09:40:54.934] INFO     Loading configuration file.\n[09:40:54.950] INFO     Creating project directory structure.\n[09:40:54.957] INFO     Finished creating directory structure.\n[09:40:54.958] INFO     Setting up common base files.\n[09:40:54.964] INFO     Account: Setting up management.\n[09:40:54.965] INFO             Layer: Setting up config.\n[09:40:54.968] INFO             Layer: Setting up base-tf-backend.\n[09:40:54.969] INFO             Layer: Setting up base-identities.\n[09:40:54.984] INFO             Layer: Setting up organizations.\n[09:40:54.989] INFO             Layer: Setting up security-base.\n[09:40:54.990] INFO     Account: Setting up security.\n[09:40:54.991] INFO             Layer: Setting up config.\n[09:40:54.994] INFO             Layer: Setting up base-tf-backend.\n[09:40:54.995] INFO             Layer: Setting up base-identities.\n[09:40:55.001] INFO             Layer: Setting up security-base.\n[09:40:55.002] INFO     Account: Setting up shared.\n[09:40:55.003] INFO             Layer: Setting up config.\n[09:40:55.006] INFO             Layer: Setting up base-tf-backend.\n[09:40:55.007] INFO             Layer: Setting up base-identities.\n[09:40:55.008] INFO             Layer: Setting up security-base.\n[09:40:55.009] INFO             Layer: Setting up base-network.\n[09:40:55.013] INFO     Project configuration finished.\n               INFO     Reformatting terraform configuration to the standard style.\n[09:40:55.743] INFO     Finished setting up project.\n

    More information on project create

    In this step, the directory structure for the project and all definition files are created using the information from the project.yaml file and checked for correct formatting.

    You will end up with something that looks like this:

    MyExample project file structure

    \ud83d\udcc2 myexample\n\u251c\u2500\u2500 \ud83d\udcc4 build.env\n\u251c\u2500\u2500 \ud83d\udcc4 project.yaml\n\u251c\u2500\u2500 \ud83d\udcc2 config\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 common.tfvars\n\u251c\u2500\u2500 \ud83d\udcc2 management\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 config\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 account.tfvars\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 backend.tfvars\n|   \u251c\u2500\u2500 \ud83d\udcc2 global\n|   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 organizations\n|   \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 accounts.tf\n|   \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 config.tf\n|   \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 delegated_administrator.tf\n|   \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 locals.tf\n|   \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 organizational_units.tf\n|   \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 organization.tf\n|   \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 policies_scp.tf\n|   \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 policy_scp_attachments.tf\n|   \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 service_linked_roles.tf\n|   \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 variables.tf\n|   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 base-identities\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 account.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 config.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 groups.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 keys\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 locals.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 outputs.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 roles.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 users.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 variables.tf\n|   \u2514\u2500\u2500 \ud83d\udcc2 us-east-1\n|    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 base-tf-backend\n|    \u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 config.tf\n|    \u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 locals.tf\n|    \u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 main.tf\n|    \u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 variables.tf\n|    \u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 security-base\n|    \u00a0\u00a0     \u251c\u2500\u2500 \ud83d\udcc4 account.tf\n|    \u00a0\u00a0     \u251c\u2500\u2500 \ud83d\udcc4 config.tf\n|    \u00a0\u00a0     \u2514\u2500\u2500 \ud83d\udcc4 variables.tf\n\u251c\u2500\u2500 \ud83d\udcc2 security\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 config\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 account.tfvars\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 backend.tfvars\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 global\n|   |   \u2514\u2500\u2500 \ud83d\udcc2 base-identities\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 account.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 config.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 groups_policies.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 groups.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 keys\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 locals.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 outputs.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 role_policies.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 roles.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 users.tf\n|   \u2502\u00a0\u00a0  \u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 variables.tf\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 us-east-1\n|    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 base-tf-backend\n|    \u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 config.tf\n|    \u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 locals.tf\n|    \u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 main.tf\n|    \u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 variables.tf\n|    \u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 security-base\n|    \u00a0\u00a0     \u251c\u2500\u2500 \ud83d\udcc4 account.tf\n|    \u00a0\u00a0     \u251c\u2500\u2500 \ud83d\udcc4 config.tf\n|    \u00a0\u00a0     \u251c\u2500\u2500 \ud83d\udcc4 iam_access_analyzer.tf\n|    \u00a0\u00a0     \u251c\u2500\u2500 \ud83d\udcc4 locals.tf\n\u2502    \u00a0\u00a0     \u2514\u2500\u2500 \ud83d\udcc4 variables.tf\n\u2514\u2500\u2500 \ud83d\udcc2 shared\n    \u251c\u2500\u2500 \ud83d\udcc2 config\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 account.tfvars\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 backend.tfvars\n    \u251c\u2500\u2500 \ud83d\udcc2 global\n    |   \u2514\u2500\u2500 \ud83d\udcc2 base-identities\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 account.tf\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 config.tf\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 locals.tf\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 policies.tf\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 roles.tf\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 service_linked_roles.tf\n    |    \u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 variables.tf\n    \u2514\u2500\u2500 \ud83d\udcc2 us-east-1\n        \u251c\u2500\u2500 \ud83d\udcc2 base-network\n        \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 account.tf\n        \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 config.tf\n        \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 locals.tf\n        \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 network.tf\n        \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 network_vpc_flow_logs.tf\n        \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 outputs.tf\n        \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 variables.tf\n        \u251c\u2500\u2500 \ud83d\udcc2 base-tf-backend\n        \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 config.tf\n        \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 locals.tf\n        \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 main.tf\n        \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 variables.tf\n        \u2514\u2500\u2500 \ud83d\udcc2 security-base\n            \u251c\u2500\u2500 \ud83d\udcc4 account.tf\n            \u251c\u2500\u2500 \ud83d\udcc4 config.tf\n            \u2514\u2500\u2500 \ud83d\udcc4 variables.tf\n\n

    As you can see, it is a structure comprised of directories for each account containing all the definitions for each of the accounts respective layers.

    \n

    The layers themselves are also grouped based on the region in which they are deployed. The regions are configured through the project.yaml file. In the case of the Leverage landing zone, most layers are deployed in the primary region, so you can see the definition of these layers in a us-east-1 directory, as per the example configuration.

    \n

    Some layers are not bound to a region because their definition is mainly comprised of resources for services that are global in nature, like IAM or Organizations. These kind of layers are kept in a global directory.

    "},{"location":"try-leverage/leverage-project-setup/#next-steps","title":"Next steps","text":"

    You have now created the definition of all the infrastructure for your project and configured the credentials need to deploy such infrastructure in the AWS environment.

    \n

    Next, you will orchestrate the first and main account of the project, the management account.

    "},{"location":"try-leverage/local-setup/","title":"Install Leverage CLI","text":"

    Leverage-based projects are better managed via the Leverage CLI which is a companion tool that simplifies your daily interactions with Leverage. This page will guide you through the installation steps.

    "},{"location":"try-leverage/local-setup/#prerequisites","title":"Prerequisites","text":"

    In order to install the CLI you should have the following installed in your system:

    "},{"location":"try-leverage/local-setup/#install-leverage-cli_1","title":"Install Leverage CLI","text":"

    Leverage CLI is distributed as a python package that you can install it via pip as follows:

    pip install leverage\n

    For further details on installing Leverage CLI: Install Leverage CLI

    "},{"location":"try-leverage/local-setup/#verify-your-leverage-cli-installation","title":"Verify your Leverage CLI installation","text":"

    Verify that your Leverage CLI installation was successful by running the following command:

    $ leverage --version\nleverage, version 1.9.2\n

    It is generally recommended to install the latest stable version of the CLI

    "},{"location":"try-leverage/local-setup/#enable-tab-completion","title":"Enable tab completion","text":"

    If you use Bash, Zsh or Fish, you can enable shell completion for Leverage commands.

    BashZshFish

    Add to ~/.bashrc:

    eval \"$(_LEVERAGE_COMPLETE=bash_source leverage)\"\n

    Add to ~/.zshrc:

    eval \"$(_LEVERAGE_COMPLETE=zsh_source leverage)\"\n

    Add to ~/.config/fish/completions/leverage.fish:

    eval (env _LEVERAGE_COMPLETE=fish_source leverage)\n

    Now you need to restart your shell.

    "},{"location":"try-leverage/local-setup/#next-steps","title":"Next steps","text":"

    Now you have your system completely configured to work on a Leverage project.

    Next, you will setup and create your Leverage project.

    "},{"location":"try-leverage/management-account/","title":"Configure the Management account","text":"

    Finally we reach the point in which you'll get to actually create the infrastructure in our AWS environment.

    Some accounts and layers rely on other accounts or layers to be deployed first, which creates dependencies between them and establishes an order in which all layers should be deployed. We will go through these dependencies in order.

    The management account is used to configure and access all the accounts in the AWS Organization. Consolidated Billing and Cost Management are also enforced though this account.

    Costs associated with this solution

    By default this AWS Reference Architecture configuration should not incur in any costs.

    "},{"location":"try-leverage/management-account/#deploy-the-management-accounts-layers","title":"Deploy the Management account's layers","text":"

    To begin, place yourself in the management account directory.

    cd management\n

    "},{"location":"try-leverage/management-account/#terraform-backend-layer","title":"Terraform backend layer","text":"

    Move into the us-east-1/base-tf-backend directory and run:

    leverage terraform init --skip-validation\nleverage terraform apply\n

    All apply commands will prompt for confirmation, answer yes when this happens.

    More information on terraform init and terraform apply

    Now, the infrastructure for the Terraform state management is created. The next step is to push the local .tfstate to the bucket. To do this, uncomment the backend section for the terraform configuration in management/base-tf-backend/config.tf

      backend \"s3\" {\nkey = \"management/tf-backend/terraform.tfstate\"\n}\n

    And run once more:

    leverage terraform init\n

    When prompted, answer yes. Now you can safely remove the terraform.tfstate and terraform.tfstate.backup files created during the apply step.

    Terraform backend

    More information regarding what is the Terraform backend and Terraform state management:

    "},{"location":"try-leverage/management-account/#organizations-layer","title":"Organizations layer","text":"

    Next, in the same fashion as in the previous layer, move into the global/organizations directory and run:

    leverage terraform init\nleverage terraform apply\n

    The AWS account that you created manually is the management account itself, so to prevent Terraform from trying to create it and error out, this account definition is commented by default in the code. Now you need to make the Terraform state aware of the link between the two. To do that, uncomment the management organizations account resource in accounts.tf

    resource \"aws_organizations_account\" \"management\" {\nname  = \"${var.project_long}-management\"\nemail = local.management_account.email\n}\n

    Grab the management account id that previously was automatically filled in for us in the project.yaml file

    ...\norganization:\naccounts:\n- name: management\nemail: myexample-aws@example.com\nid: '000123456789'\n...\n

    And run:

    leverage terraform import aws_organizations_account.management 000123456789\n

    More information on terraform import

    Getting errors with zsh?

    Zsh users may need to prepend noglob to the import command for it to be recognized correctly, as an alternative, square brackets can be escaped as \\[\\]

    "},{"location":"try-leverage/management-account/#security-layer","title":"Security layer","text":"

    Change directory to us-east-1/security-base and run this:

    leverage terraform init\nleverage terraform apply\n

    "},{"location":"try-leverage/management-account/#update-the-bootstrap-credentials","title":"Update the bootstrap credentials","text":"

    Now that the management account has been deployed, and more specifically, all Organizations accounts have been created (in the organizations layer) you need to update the credentials for the bootstrap process before proceeding to deploy any of the remaining accounts.

    This will fetch the organizations structure from the AWS environment and create individual profiles associated with each account for the AWS CLI to use. So, run:

    $ leverage credentials configure --type BOOTSTRAP --skip-access-keys-setup\n[09:08:44.762] INFO     Loading configuration file.\n[09:08:44.785]     Loading project environment configuration file.\n[09:08:44.791]     Loading Terraform common configuration.\n[09:08:53.247]     Configuring assumable roles.\n[09:08:53.248]     Fetching organization accounts.\n[09:08:55.193]     Backing up account profiles file.\n[09:08:55.761]             Configuring profile me-management-oaar\n[09:08:59.977]             Configuring profile me-security-oaar\n[09:09:04.081]             Configuring profile me-shared-oaar\n[09:09:08.305]     Account profiles configured in: /home/user/.aws/me/config\n[09:09:08.307] INFO     Updating project's Terraform common configuration.\n

    More information on credentials configure

    "},{"location":"try-leverage/management-account/#sso-layer","title":"SSO layer","text":"

    Before working on the SSO layer you have to navigate to the AWS IAM Identity Center page, set the region to the primary region you've chosen and enable Single Sign-On (SSO) by clicking on the Enable button.

    Now back to the terminal. The SSO layer is deployed in two steps. First, switch to the global/sso directory and run the following:

    leverage terraform init\nleverage terraform apply\n

    Secondly, open the account_assignments.tf file and uncomment the entire section that starts with this line:

    # module \"account_assignments\" {\n#   source = \"github.com/binbashar/terraform-aws-sso.git//modules/account-assignments?ref=0.7.1\"\n\n[REDACTED]\n\n#   ]\n# }\n

    After that, run these commands:

    leverage terraform init\nleverage terraform apply\n

    "},{"location":"try-leverage/management-account/#next-steps","title":"Next steps","text":"

    You have successfully orchestrated the management account for your project and configured the credentials for the next steps.

    Now, let's enable SSO for the rest of the process.

    "},{"location":"try-leverage/post-deployment/","title":"Post-deployment steps","text":"

    At this point the landing zone should be ready.

    The bootstrap user can now be deleted.

    "},{"location":"try-leverage/post-deployment/#delete-the-bootstrap-user","title":"Delete the bootstrap user","text":""},{"location":"try-leverage/post-deployment/#adding-sso-users-and-groups","title":"Adding SSO users and groups","text":"

    To add users or groups, please see SSO Managing Users document.

    "},{"location":"try-leverage/post-deployment/#next-steps","title":"Next steps","text":"

    Now you not only have a fully functional landing zone configuration deployed, but also are able to interact with it using your own AWS SSO credentials.

    For more detailed information on the binbash Leverage Landing Zone, visit the links below.

    "},{"location":"try-leverage/security-and-shared-accounts/","title":"Configure the Security and Shared accounts","text":"

    You should by now be more familiar with the steps required to create and configure the Management account. Now you need to do pretty much the same with two more accounts: Security and Shared. Follow the sections in this page to get started!

    What are these accounts used for?

    The Security account is intended for operating security services (e.g. GuardDuty, AWS Security Hub, AWS Audit Manager, Amazon Detective, Amazon Inspector, and AWS Config), monitoring AWS accounts, and automating security alerting and response.

    The Shared Services account supports the services that multiple applications and teams use to deliver their outcomes. Some examples include VPN servers, monitoring systems, and centralized logs management services.

    "},{"location":"try-leverage/security-and-shared-accounts/#deploy-the-security-accounts-layers","title":"Deploy the Security account's layers","text":"

    The next account to orchestrate is the security account.

    This account is intended for centralized user management via a IAM roles based cross organization authentication approach. This means that most of the users for your organization will be defined in this account and those users will access the different accounts through this one.

    First, go to the security directory.

    cd security\n

    "},{"location":"try-leverage/security-and-shared-accounts/#set-profile","title":"Set profile","text":"

    Since we are using SSO, check in security/config/backend.tfvars file the profile is set to:

    profile = \"me-security-devops\"\n

    If it is not, please modify it. Note we are using a sample short project name me, use the one you have set.

    "},{"location":"try-leverage/security-and-shared-accounts/#terraform-backend-layer","title":"Terraform backend layer","text":"

    Move into the us-east-1/base-tf-backend directory and run:

    leverage terraform init --skip-validation\nleverage terraform apply\n

    More information on terraform init and terraform apply

    Now, to push the local .tfstate to the bucket, uncomment the backend section for the terraform configuration in security/base-tf-backend/config.tf

      backend \"s3\" {\nkey = \"security/tf-backend/terraform.tfstate\"\n}\n

    And run again:

    leverage terraform init\n

    When prompted, answer yes.

    Now you can safely remove the terraform.tfstate and terraform.tfstate.backup files created during the apply step.

    "},{"location":"try-leverage/security-and-shared-accounts/#security-layer","title":"Security layer","text":"

    The last layer for the security account is the security layer. Move into the us-east-1/security-base directory and run:

    leverage terraform init\nleverage terraform apply\n

    "},{"location":"try-leverage/security-and-shared-accounts/#deploy-the-shared-accounts-layers","title":"Deploy the Shared account's layers","text":"

    The last account in this deployment is the shared account.

    Again, this account is intended for managing the infrastructure of shared services and resources such as directory services, DNS, VPN, monitoring tools or centralized logging solutions.

    Place yourself in the shared directory.

    cd shared\n

    "},{"location":"try-leverage/security-and-shared-accounts/#set-profile_1","title":"Set profile","text":"

    Since we are using SSO, check in shared/config/backend.tfvars file the profile is set to:

    profile = \"me-shared-devops\"\n

    If it is not, please modify it. Note we are using a sample short project name me, use the one you have set.

    "},{"location":"try-leverage/security-and-shared-accounts/#terraform-backend-layer_1","title":"Terraform backend layer","text":"

    Move into the us-east-1/base-tf-backend directory and run:

    leverage terraform init --skip-validation\nleverage terraform apply\n

    More information on terraform init and terraform apply

    Now, to push the local .tfstate to the bucket, uncomment the backend section for the terraform configuration in shared/base-tf-backend/config.tf

      backend \"s3\" {\nkey = \"shared/tf-backend/terraform.tfstate\"\n}\n

    And run a second time:

    leverage terraform init\n

    When prompted, answer yes.

    Now you can safely remove the terraform.tfstate and terraform.tfstate.backup files created during the apply step.

    "},{"location":"try-leverage/security-and-shared-accounts/#security-layer_1","title":"Security layer","text":"

    Next, move into the us-east-1/security-base directory:

    leverage terraform init\nleverage terraform apply\n

    "},{"location":"try-leverage/security-and-shared-accounts/#network-layer","title":"Network layer","text":"

    The last layer should be the network layer, so switch to that us-east-1/base-network and run:

    leverage terraform init\nleverage terraform apply\n

    "},{"location":"try-leverage/security-and-shared-accounts/#next-steps","title":"Next steps","text":"

    You have now a fully deployed landing zone configuration for the Leverage Reference Architecture for AWS, with its three accounts management, security and shared ready to be used.

    Next, you are going to tackle de last steps.

    "},{"location":"user-guide/","title":"Index","text":""},{"location":"user-guide/#user-guide","title":"User Guide","text":""},{"location":"user-guide/#overview","title":"Overview","text":"

    The pages in this section explore, with great detail, the architecture of the components that make up Leverage.

    But don't feel constrained to the links above, feel free to use the left menu to explore more on your own.

    "},{"location":"user-guide/cookbooks/","title":"Cookbooks","text":""},{"location":"user-guide/cookbooks/VPC-subnet-calculator/","title":"How to calculate the VPC subnet CIDRs?","text":"

    To calculate subnets this calculator can be used

    Note in this link a few params were added: the base network and mask, and the division number. In this case the example is for the shared account networking.

    This table will be shown:

    Note how this information is set in the tf file:

      vpc_cidr_block = \"172.18.0.0/20\"\nazs = [\n\"${var.region}a\",\n\"${var.region}b\"\n]\n\nprivate_subnets_cidr = [\"172.18.0.0/21\"]\nprivate_subnets = [\n\"172.18.0.0/23\",\n\"172.18.2.0/23\"\n]\n\npublic_subnets_cidr = [\"172.18.8.0/21\"]\npublic_subnets = [\n\"172.18.8.0/23\",\n\"172.18.10.0/23\"\n]\n

    Note the main CIDR is being used for the VPC. See on the left how the /20 encompasses all the rows.

    Then two divisions for /21. Note the first subnet address of the first row for each one is being used for private_subnets_cidr and public_subnets_cidr.

    Finally the /23 are being used for each subnet.

    Note we are using the first two subnet addresses for each /21. This is due to we are reserving the other two to allow adding more AZs in the future. (up to two in this case)

    If you want you can take as a reference this page to select CIDRs for each account.

    "},{"location":"user-guide/cookbooks/VPC-with-no-LandingZone/","title":"VPC with no Landing Zone","text":""},{"location":"user-guide/cookbooks/VPC-with-no-LandingZone/#what","title":"What","text":"

    Do you want to try binbash Leverage but you are not willing to transform yet your already existent infra to the binbash Leverage Landing Zone (honoring the AWS Well Architected Framework)?

    With this cookbook you will create a VPC with all the benefits binbash Leverage network layer provides.

    If you want to use the Full binbash Leverage Landing Zone please visit the Try Leverage section

    This will give you the full power of binbash Leverage and the AWS Well Architected Framework.

    "},{"location":"user-guide/cookbooks/VPC-with-no-LandingZone/#why","title":"Why","text":"

    Maybe because you found binbash Leverage but you want to try it out first before you convert your base infra.

    "},{"location":"user-guide/cookbooks/VPC-with-no-LandingZone/#how","title":"How","text":""},{"location":"user-guide/cookbooks/VPC-with-no-LandingZone/#objective","title":"Objective","text":"

    We will create a simple VPC (with all its resources) and an will add an EC2 instance to it so we can test it for real.

    "},{"location":"user-guide/cookbooks/VPC-with-no-LandingZone/#what-you-need","title":"What you need:","text":""},{"location":"user-guide/cookbooks/VPC-with-no-LandingZone/#common-process","title":"Common Process","text":"

    Ok, just install binbash Leverage and create the Leverage project. When creating credentials, do it for MANAGEMENT type. Basically:

    mkdir project && cd project\nleverage project init\n## edit the yaml file\nleverage project create\nleverage credentials configure --type MANAGEMENT\n

    You end up with this structure:

    \u276f tree -L 2 .\n.\n\u251c\u2500\u2500 build.env\n\u251c\u2500\u2500 config\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 common.tfvars\n\u251c\u2500\u2500 management\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 config\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 global\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 us-east-1\n\u251c\u2500\u2500 Pipfile\n\u251c\u2500\u2500 Pipfile.lock\n\u251c\u2500\u2500 project.yaml\n\u251c\u2500\u2500 security\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 config\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 us-east-1\n\u2514\u2500\u2500 shared\n    \u251c\u2500\u2500 config\n    \u2514\u2500\u2500 us-east-1\n

    Create a dummy account dir.

    Despite we won't create a real account (since there is no Landing Zone), we'll have to create a dir to hold all the layers we need.

    mkdir -p apps-dummy/us-east-1/\n

    Copy the config files:

    cp -r shared/config apps-dummy/\n

    In config/account.tfvars change this:

    ## Environment Name\nenvironment = \"shared\"\n
    to this
    ## Environment Name\nenvironment = \"apps-dummy\"\n
    (note the environment is the same as the created dir)

    In config/backend.tfvars change this:

    profile = \"bm-shared-oaar\"\n
    to this:
    profile = \"bm-management\"\n

    "},{"location":"user-guide/cookbooks/VPC-with-no-LandingZone/#vpc-process","title":"VPC Process","text":"

    Copy the network layer:

    cp -r shared/us-east-1/base-network apps-dummy/us-east-1/\n

    Go into the layer:

    cp -r shared/us-east-1/base-network apps-dummy/us-east-1/\ncd apps-dummy/us-east-1/base-network\n

    Since we are testing we won't use the S3 backend (we didn't create the bucket, but you can do it easily with the base-tf-backend layer), so comment this line in config.tf file:

      #backend \"s3\" {\n#  key = \"shared/network/terraform.tfstate\"\n#}\n

    Initialize the layer:

    leverage tf init --skip-validation\n

    Note the skip-validation flag. This is needed since we are using local tfstate.

    Plan it:

    leverage tf plan\n

    If you are happy (or you are unhappy but you are ok with the plan), apply it:

    leverage tf apply\n

    You should end up with something like this:

    Apply complete! Resources: 20 added, 0 changed, 0 destroyed.\n\nOutputs:\n\navailability_zones = [\n\"us-east-1a\",\n  \"us-east-1b\",\n]\nnat_gateway_ids = []\nprivate_route_table_ids = [\n\"rtb-065deXXXXXXX86b6d\",\n]\nprivate_subnets = [\n\"subnet-0aXXXXXXXXXXd80a6\",\n  \"subnet-0bXXXXXXXXXX0ff67\",\n]\nprivate_subnets_cidr = [\n\"172.18.0.0/21\",\n]\npublic_route_table_ids = [\n\"rtb-01XXXXXXXXXXXX887\",\n]\npublic_subnets = [\n\"subnet-0648XXXXXXXXX69\",\n  \"subnet-0297XXXXXXXXf10\",\n]\npublic_subnets_cidr = [\n\"172.18.8.0/21\",\n]\nvpc_cidr_block = \"172.18.0.0/20\"\nvpc_id = \"vpc-0aXXXXXXXXXX06d8f\"\nvpc_name = \"bm-apps-dummy-vpc\"\n

    "},{"location":"user-guide/cookbooks/VPC-with-no-LandingZone/#ec2-process","title":"EC2 Process","text":"

    Great, now we can go for the EC2!

    "},{"location":"user-guide/cookbooks/VPC-with-no-LandingZone/#get-the-layer","title":"Get the layer","text":"

    For this step we'll go for a layer that can be found in the binbash Leverage RefArch under this directory.

    You can download a directory from a git repository using this Firefox addon or any method you want.

    Note when you copy the layer (e.g. with gitzip), the file common-variables.tf , which is a soft link, was probably copied as a regular file. If this happens, delete it:

    cd ec2-fleet-ansible\\ --\nrm common-variables.tf\n
    "},{"location":"user-guide/cookbooks/VPC-with-no-LandingZone/#prepare-the-layer","title":"Prepare the layer","text":"

    Again, since we are not running the whole binbash Leverage Landing Zone we need to comment out these lines in config.tf:

      #backend \"s3\" {\n#  key = \"apps-devstg/ec2-fleet-ansible/terraform.tfstate\"\n#}\n

    Also in this file, comment out these two resources:

    data \"terraform_remote_state\" \"security\" {\ndata \"terraform_remote_state\" \"vpc-shared\" {\n

    And change vpc to be like this:

    data \"terraform_remote_state\" \"vpc\" {\nbackend = \"local\"\nconfig = {\npath = \"../base-network/terraform.tfstate\"\n}\n}\n
    Again, since we are not using the full binbash Leverage capabilities, we are not using the S3 Terraform backend, thus the backend is local.

    In ec2_fleet.tf update module version like this:

      source = \"github.com/binbashar/terraform-aws-ec2-instance.git?ref=v5.5.0\"\n

    Init the layer:

    leverage tf init --skip-validation\n
    (same as before with the skip flag)

    Now, we need some common and specific vars that are not set.

    So, create a variables.tf file with this content:

    variable \"environment\" {\ntype        = string\ndescription = \"Environment Name\"\n}\nvariable \"profile\" {\ntype        = string\ndescription = \"AWS Profile (required by the backend but also used for other resources)\"\n}\nvariable \"region\" {\ntype        = string\ndescription = \"AWS Region\"\n}\n##=============================#\n##  EC2 Attributes             #\n##=============================#\nvariable \"aws_ami_os_id\" {\ntype        = string\ndescription = \"AWS AMI Operating System Identificator\"\ndefault     = \"ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-*\"\n}\n\nvariable \"aws_ami_os_owner\" {\ntype        = string\ndescription = \"AWS AMI Operating System Owner, eg: 099720109477 for Canonical \"\ndefault     = \"099720109477\"\n}\n\n## security.tf file\n##=============================#\n##  SSM Attributes             #\n##=============================#\nvariable \"instance_profile\" {\ntype        = string\ndescription = \"Whether or not to create the EC2 profile, use null or 'true'\"\ndefault     = \"true\"\n}\n\nvariable \"prefix\" {\ntype        = string\ndescription = \"EC2 profile prefix\"\ndefault     = \"fleet-ansible\"\n}\n\nvariable \"name\" {\ntype        = string\ndescription = \"EC2 profile name\"\ndefault     = \"ssm-demo\"\n}\n\nvariable \"enable_ssm_access\" {\ntype        = bool\ndescription = \"Whether or not to attach SSM policy to EC2 profile IAM role, use false or true\"\ndefault     = true\n}\n
    (set the Ubuntu image as per your needs)

    In ec2_fleet.tf file comment these lines:

     # data.terraform_remote_state.vpc-shared.outputs.vpc_cidr_block\n\n# key_name               = data.terraform_remote_state.security.outputs.aws_key_pair_name\n
    ...again, due to the lack of the whole binbash Leverage Landing Zone...

    If you plan to access the instance from the Internet (EC2 in a public subnet)(e.g. to use Ansible), you change the first line to \"0.0.0.0/0\". (or better, a specific public IP)

    If you want to add an SSH key (e.g. to use Ansible), you can generate a new SSH key, add a resource like this:

    resource \"aws_key_pair\" \"devops\" {\nkey_name   = \"devops-key\"\npublic_key = \"ssh-ed25519 AAAAC3N9999999999999999999999999eF Binbash-AWS-instances\"\n}\n
    And replace the line in ec2_fleet.tf with this one:
      key_name               = aws_key_pair.devops.key_name\n

    In the same file, change instance_type as per your needs.

    Also you can add this * * *to the ec2_ansible_fleet resource:

      create_spot_instance = true\n
    to create spot instances.... and this
      create_iam_instance_profile = true\niam_role_description        = \"IAM role for EC2 instance\"\niam_role_policies = {\nAmazonSSMManagedInstanceCore = \"arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore\"\n}\n
    to add SSM access.

    In locals.tf file check the variable multiple_instances. There the EC2 instances are defined, by default there are four. Remember to set the subnets in which the instances will be created.

    Finally, apply the layer:

    leverage tf apply\n

    Check your public IP and try to SSH into your new instance!

    Have fun!

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/","title":"How to add an external cluster to ArgoCD to manage it","text":""},{"location":"user-guide/cookbooks/argocd-external-cluster/#goal","title":"Goal","text":"

    Given an ArgoCD installation created with binbash Leverage Landing Zone using the EKS layer, add and manage an external Cluster.

    There can be a single ArgoCD instance for all cluster or multiple instances installed:

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#assumptions","title":"Assumptions","text":"

    We are assuming the binbash Leverage Landing Zone is deployed, two accounts called shared and apps-devstg were created and a region us-east-1 is being used. In any case you can adapt these examples to other scenarios.

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#requirements","title":"Requirements","text":"

    The target cluster must have IRSA enabled.

    If this cluster was created using the binbash Leverage Landing Zone EKS layer this req is met.

    Also the VPC for both K8s cluster should be connected. (e.g. VPCPeerings have to be in place between them)

    Info

    Learn how to create VPCPeerings using binbash Leverage Landing Zone here.

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#how-to","title":"How to","text":"

    There are a few ways to accomplish this. Here are two of them, you can find more here.

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#iam-roles","title":"IAM Roles","text":"

    First we need to understand the how-to do this.

    Given this diagram:

    this workflow shows up:

    This way, ArgoCD, from the source cluster, can deploy stuff into the target cluster.

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#steps","title":"Steps","text":"

    These steps were created to match two EKS clusters, in two AWS accounts, created using binbash Leverage Landing Zone.

    ArgoCD will be deployed in shared account and will be controlling the cluster in apps-devstg.

    Info

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#create-the-source-identities","title":"Create the source identities","text":"

    Info

    This has to be done in shared account.

    ArgoCD should be deployed using the shared/us-east-1/k8s-eks/k8s-components layer.

    The identities to be used by ArgoCD have to be updated in the shared/us-east-1/k8s-eks/identities layer.

    So, go into this layer and edit the ids_argocd.tf file.

    Here the ServiceAccount used have to be modified to include all the posibilities in the argocd namespace:

    module \"role_argocd_devstg\" {\nsource = \"github.com/binbashar/terraform-aws-iam.git//modules/iam-assumable-role-with-oidc?ref=v5.37.1\"\n\ncreate_role  = true\nrole_name    = \"${local.environment}-argocd-devstg\"\nprovider_url = replace(data.terraform_remote_state.eks-cluster.outputs.cluster_oidc_issuer_url, \"https://\", \"\")\n\nrole_policy_arns              = [aws_iam_policy.argocd_devstg.arn]\noidc_fully_qualified_subjects = [\"system:serviceaccount:argocd-devstg:*\"]\n\ntags = local.tags_cluster_autoscaler\n}\n

    Note all the argocd namespace's ServiceAccounts were added to oidc_fully_qualified_subjects (because different ArgoCD components use different SAs), and they will be capable of assume the role ${local.environment}-argocd-devstg. (Since we are working in shared the role will be shared-argocd-devstg)

    This role lives in shared account.

    Apply the layer:

    leverage tf apply\n

    Info

    Note this step creates a role and binds it to the in-cluster serviceaccounts.

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#create-the-target-role-and-change-the-aws_auth-config-map","title":"Create the target role and change the aws_auth config map","text":"

    Info

    This has to be done in apps-devstg account.

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#create-the-role","title":"Create the role","text":"

    Go into the apps-devstg/global/base-identities layer.

    In file roles.tf add this resource:

    module \"iam_assumable_role_argocd\" {\nsource = \"github.com/binbashar/terraform-aws-iam.git//modules/iam-assumable-role?ref=v4.1.0\"\n\ntrusted_role_arns = [\n\"arn:aws:iam::${var.accounts.shared.id}:root\"\n]\n\ncreate_role = true\nrole_name   = \"ArgoCD\"\nrole_path   = \"/\"\n\n  #\n  # MFA setup\n  #\nrole_requires_mfa    = false\nmfa_age              = 43200 # Maximum CLI/API session duration in seconds between 3600 and 43200\nmax_session_duration = 3600  # Max age of valid MFA (in seconds) for roles which require MFA\ncustom_role_policy_arns = [\n]\n\ntags = local.tags\n}\n

    Note MFA is deactivated since this is a programatic access role. Also no policies are added since we need to assume it just to access the cluster.

    Apply the layer:

    leverage tf apply\n

    Info

    This step will add a role that can be assumed from the shared account.

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#update-the-aws_auth-config-map","title":"Update the aws_auth config map","text":"

    cd into layer apps-devstg/us-east-1/k8s-eks/cluster.

    Edit file locals.tf, under map_roles list add this:

        {\nrolearn  = \"arn:aws:iam::${var.accounts.apps-devstg.id}:role/ArgoCD\"\nusername = \"ArgoCD\"\ngroups   = [\"system:masters\"]\n},\n

    You can narrow the access modifying groups as per your own needs.

    Apply the layer:

    leverage tf apply\n

    To recover the the API Server run this:

    APISERVER=$(leverage kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | sed -E '/^\\[/d')\n

    Info

    This step will add the role-k8sgroup binding.

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#create-the-external-cluster-in-argocd","title":"Create the external cluster in ArgoCD","text":"

    Info

    This has to be done in shared account.

    In shared/us-east-1/k8s-eks/k8s-components layer modify files cicd-argocd.tf and chart-values/argocd.yaml and add this to the first one:

    ##------------------------------------------------------------------------------\n## ArgoCD DEVSTG: GitOps + CD\n##------------------------------------------------------------------------------\nresource \"helm_release\" \"argocd_devstg\" {\ncount      = var.enable_argocd_devstg ? 1 : 0\nname       = \"argocd-devstg\"\nnamespace  = kubernetes_namespace.argocd_devstg[0].id\nrepository = \"https://argoproj.github.io/argo-helm\"\nchart      = \"argo-cd\"\nversion    = \"6.7.3\"\nvalues = [\ntemplatefile(\"chart-values/argocd.yaml\", {\nargoHost      = \"argocd-devstg.${local.environment}.${local.private_base_domain}\"\ningressClass  = local.private_ingress_class\nclusterIssuer = local.clusterissuer_vistapath\nroleArn       = data.terraform_remote_state.eks-identities.outputs.argocd_devstg_role_arn\nremoteRoleARN = \"role\"\nremoteClusterName   = \"clustername\"\nremoteServer  = \"remoteServer\"\nremoteName    = \"remoteName\"\nremoteClusterCertificate = \"remoteClusterCertificate\"\n}),\n    # We are using a different approach here because it is very tricky to render\n    # properly the multi-line sshPrivateKey using 'templatefile' function\nyamlencode({\nconfigs = {\nsecret = {\nargocd_devstgServerAdminPassword = data.sops_file.secrets.data[\"argocd_devstg.serverAdminPassword\"]\n}\n        # Grant Argocd_Devstg access to the infrastructure repo via private SSH key\nrepositories = {\nwebapp = {\nname          = \"webapp\"\nproject       = \"default\"\nsshPrivateKey = data.sops_file.secrets.data[\"argocd_devstg.webappRepoDeployKey\"]\ntype          = \"git\"\nurl           = \"git@github.com:VistaPath/webapp.git\"\n}\n}\n}\n      # Enable SSO via Github\nserver = {\nconfig = {\nurl          = \"https://argocd_devstg.${local.environment}.${local.private_base_domain}\"\n\"dex.config\" = data.sops_file.secrets.data[\"argocd_devstg.dexConfig\"]\n}\n}\n})\n]\n}\n

    Note these lines:

          remoteRoleARN = \"role\"\nremoteClusterName   = \"clustername\"\nremoteServer  = \"remoteServer\"\nremoteName    = \"remoteName\"\nremoteClusterCertificate = \"remoteClusterCertificate\"\n

    Dictionary:

    And this in the second file:

    configs:\nclusterCredentials:\n- name: ${remoteName}\nserver: ${remoteServer}\nlabels: {}\nannotations: {}\nnamespaces: namespace1,namespace2\nclusterResources: false\nconfig:\nawsAuthConfig:\nclusterName: ${remoteClusterName}\nroleARN: ${remoteRoleARN}\ntlsClientConfig:\ninsecure: false\ncaData: ${remoteClusterCertificate}\n

    clusterResources false is so that ArgoCD is prevented to manage cluster level resources.

    namespaces scopes the namespaces on which ArgoCD can deploy resources.

    Apply the layer:

    leverage tf apply\n

    Info

    This step will create the external-cluster configuration for ArgoCD. Now you can see the cluster in the ArgoCD web UI.

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#bearer-tokens","title":"Bearer Tokens","text":"

    This is a simpler (than the previous one) method, but also is less secure.

    It uses a bearer token, which should be rotated periodically. (maybe manually or with a custom process)

    Given this diagram:

    ArgoCD will call the target cluster directly using the bearer token as authentication.

    So, these are the steps:

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#create-the-serviceaccount","title":"Create the ServiceAccount","text":"

    Info

    This has to be done in apps-devstg account.

    There are two ways to grant access. Cluster level or namespace scoped.

    If namespace scoped ServiceAccount, Role and Rolebinding are needed to grant access to ArgoCD to the target cluster. If cluster level then ServiceAccount, ClusterRole and ClusterRolebinding. The former needs the namespaces to be created beforehand. The later allows ArgoCD to create the namespaces.

    In the target cluster identities layer at apps-devstg/us-east-1/k8s-eks/identities create a tf file and add this:

    The following example is for namespace scoped way.

    locals {\n  # namespaces ArgoCD has to manage\nnamespaces = toset([\"test\"])\n}\nprovider  kubernetes {\nhost                   = data.aws_eks_cluster.cluster.endpoint\ncluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)\ntoken                  = data.aws_eks_cluster_auth.cluster.token\n}\ndata \"aws_eks_cluster\" \"cluster\" {\nname = data.terraform_remote_state.eks-cluster.outputs.cluster_name\n}\n\ndata \"aws_eks_cluster_auth\" \"cluster\" {\nname = data.terraform_remote_state.eks-cluster.outputs.cluster_name\n}\n\nresource \"kubernetes_service_account\" \"argocd-managed\" {\nfor_each = local.namespaces\n\nmetadata {\nname = \"argocd-managed\"\nnamespace = each.key\n}\n}\n\nresource \"kubernetes_secret\" \"argocd-managed\" {\nfor_each = local.namespaces\n\nmetadata {\nannotations = {\n\"kubernetes.io/service-account.name\" = kubernetes_service_account.argocd-managed[each.key].metadata.0.name\n}\n\ngenerate_name = \"argocd-managed-\"\nnamespace = each.key\n}\n\ntype                           = \"kubernetes.io/service-account-token\"\nwait_for_service_account_token = true\n}\n\nresource \"kubernetes_role\" \"argocd-managed\" {\nfor_each = local.namespaces\n\nmetadata {\nname      = \"argocd-managed-role\"\nnamespace = each.key\n}\n\nrule {\napi_groups= [\"*\"]\nresources= [\"*\"]\nverbs= [\"*\"]\n}\n}\n\nresource \"kubernetes_role_binding\" \"argocd-managed\" {\nfor_each = local.namespaces\n\nmetadata {\nname      = \"${kubernetes_role.argocd-managed[each.key].metadata[0].name}-binding\"\nnamespace = each.key\n}\n\nrole_ref {\napi_group = \"rbac.authorization.k8s.io\"\nkind      = \"Role\"\nname      = kubernetes_role.argocd-managed[each.key].metadata[0].name\n}\nsubject {\nkind      = \"ServiceAccount\"\nname      = \"argocd-managed\"\nnamespace = each.key\n}\n}\n

    The following example is for cluster level way.

    provider  kubernetes {\nhost                   = data.aws_eks_cluster.cluster.endpoint\ncluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)\ntoken                  = data.aws_eks_cluster_auth.cluster.token\n}\ndata \"aws_eks_cluster\" \"cluster\" {\nname = data.terraform_remote_state.eks-cluster.outputs.cluster_name\n}\n\ndata \"aws_eks_cluster_auth\" \"cluster\" {\nname = data.terraform_remote_state.eks-cluster.outputs.cluster_name\n}\n\nresource \"kubernetes_service_account\" \"argocd-managed\" {\nmetadata {\nname = \"argocd-managed\"\nnamespace = \"kube-system\"\n}\n}\n\nresource \"kubernetes_secret\" \"argocd-managed\" {\nmetadata {\nannotations = {\n\"kubernetes.io/service-account.name\" = kubernetes_service_account.argocd-managed.metadata.0.name\n}\n\ngenerate_name = \"argocd-managed-\"\nnamespace = \"kube-system\"\n}\n\ntype                           = \"kubernetes.io/service-account-token\"\nwait_for_service_account_token = true\n}\n\nresource \"kubernetes_cluster_role\" \"argocd-managed\" {\nmetadata {\nname      = \"argocd-managed-role\"\n}\n\nrule {\napi_groups= [\"*\"]\nresources= [\"*\"]\nverbs= [\"*\"]\n}\n}\n\nresource \"kubernetes_cluster_role_binding\" \"argocd-managed\" {\nmetadata {\nname      = \"${kubernetes_role.argocd-managed.metadata[0].name}-binding\"\n}\n\nrole_ref {\napi_group = \"rbac.authorization.k8s.io\"\nkind      = \"ClusterRole\"\nname      = kubernetes_role.argocd-managed.metadata[0].name\n}\nsubject {\nkind      = \"ServiceAccount\"\nname      = \"argocd-managed\"\nnamespace = \"kube-system\"\n}\n}\n

    Info

    This step will create a ServiceAccount, a Role with the needed permissions, the RoleBinding and the secret with the token. (or clusterrole and clusterrolebinding) Also, multiple namespaces can be specified for namespace scoped way.

    To recover the token and the API Server run this:

    NAMESPACE=test\nSECRET=$(leverage kubectl get secret -n ${NAMESPACE} -o jsonpath='{.items[?(@.metadata.generateName==\\\"argocd-managed-\\\")].metadata.name}' | sed -E '/^\\[/d')\nTOKEN=$(leverage kubectl get secret ${SECRET} -n ${NAMESPACE} -o jsonpath='{.data.token}' | sed -E '/^\\[/d' | base64 --decode)\nAPISERVER=$(leverage kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | sed -E '/^\\[/d')\n
    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#create-the-external-cluster-in-argocd_1","title":"Create the external cluster in ArgoCD","text":"

    Info

    This has to be done in shared account.

    In shared/us-east-1/k8s-eks/k8s-components layer modify files cicd-argocd.tf and chart-values/argocd.yaml and add this to the first one:

    ##------------------------------------------------------------------------------\n## ArgoCD DEVSTG: GitOps + CD\n##------------------------------------------------------------------------------\nresource \"helm_release\" \"argocd_devstg\" {\ncount      = var.enable_argocd_devstg ? 1 : 0\nname       = \"argocd-devstg\"\nnamespace  = kubernetes_namespace.argocd_devstg[0].id\nrepository = \"https://argoproj.github.io/argo-helm\"\nchart      = \"argo-cd\"\nversion    = \"6.7.3\"\nvalues = [\ntemplatefile(\"chart-values/argocd.yaml\", {\nargoHost                 = \"argocd-devstg.${local.environment}.${local.private_base_domain}\"\ningressClass             = local.private_ingress_class\nclusterIssuer            = local.clusterissuer_vistapath\nroleArn                  = data.terraform_remote_state.eks-identities.outputs.argocd_devstg_role_arn\nremoteServer             = \"remoteServer\"\nremoteName               = \"remoteName\"\nremoteClusterCertificate = \"remoteClusterCertificate\"\nbearerToken              = \"bearerToken\"\n}),\n    # We are using a different approach here because it is very tricky to render\n    # properly the multi-line sshPrivateKey using 'templatefile' function\nyamlencode({\nconfigs = {\nsecret = {\nargocd_devstgServerAdminPassword = data.sops_file.secrets.data[\"argocd_devstg.serverAdminPassword\"]\n}\n        # Grant Argocd_Devstg access to the infrastructure repo via private SSH key\nrepositories = {\nwebapp = {\nname          = \"webapp\"\nproject       = \"default\"\nsshPrivateKey = data.sops_file.secrets.data[\"argocd_devstg.webappRepoDeployKey\"]\ntype          = \"git\"\nurl           = \"git@github.com:VistaPath/webapp.git\"\n}\n}\n}\n      # Enable SSO via Github\nserver = {\nconfig = {\nurl          = \"https://argocd_devstg.${local.environment}.${local.private_base_domain}\"\n\"dex.config\" = data.sops_file.secrets.data[\"argocd_devstg.dexConfig\"]\n}\n}\n})\n]\n}\n

    Note these lines:

          remoteServer  = \"remoteServer\"\nremoteName    = \"remoteName\"\nremoteClusterCertificate = \"remoteClusterCertificate\"\nbearerToken = \"bearerToken\"\n

    Dictionary:

    And this in the second file:

    configs:\nclusterCredentials:\n- name: ${remoteName}\nserver: ${remoteServer}\nlabels: {}\nannotations: {}\nnamespaces: namespace1,namespace2\nclusterResources: false\nconfig:\nbearerToken: ${bearerToken}\ntlsClientConfig:\ninsecure: false\ncaData: ${remoteClusterCertificate}\n

    clusterResources false is so that ArgoCD is prevented to manage cluster level resources.

    namespaces scopes the namespaces on which ArgoCD can deploy resources.

    Apply the layer:

    leverage tf apply\n

    Info

    This step will create the external-cluster configuration for ArgoCD. Now you can see the cluster in the ArgoCD web UI.

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#deploying-stuff-to-the-target-cluster","title":"Deploying stuff to the target cluster","text":"

    To deploy an App to a given cluster, these lines have to be added to the manifest:

        spec:\ndestination:\nserver: \"https://kubernetes.default.svc\"\nnamespace: \"appnamespace\"\n

    Being spec.destination.server here the config.clusterCredentials[*].server in the ArgoCD's external cluster secret.

    "},{"location":"user-guide/cookbooks/argocd-external-cluster/#references","title":"References","text":"

    ArgoCD documentation.

    "},{"location":"user-guide/cookbooks/enable-nat-gateway/","title":"Enable nat-gateway using binbash Leverage","text":""},{"location":"user-guide/cookbooks/enable-nat-gateway/#goal","title":"Goal","text":"

    To activate the NAT Gateway in a VPC created using binbash Leverage Landing Zone.

    "},{"location":"user-guide/cookbooks/enable-nat-gateway/#assumptions","title":"Assumptions","text":"

    We are assuming the binbash Leverage Landing Zone is deployed, an account called apps-devstg was created and a region us-east-1 is being used. In any case you can adapt these examples to other scenarios.

    "},{"location":"user-guide/cookbooks/enable-nat-gateway/#how-to","title":"How to","text":"

    Go into you account/region/network layer:

    cd apps-devstg/us-east-1/base-network\n

    Info

    if you called the layer other that this, please set the right dir here

    Check a file called terraform.auto.tfvars exists. If it does not, create it.

    Edit the file and set this content:

    vpc_enable_nat_gateway = true\n

    Apply the layer as usual:

    leverage tf apply\n
    "},{"location":"user-guide/cookbooks/enable-nat-gateway/#how-to-disable-the-nat-gateway","title":"How to disable the nat gateway","text":"

    Do the same as before but setting this in the tfvars file:

    vpc_enable_nat_gateway = false\n
    "},{"location":"user-guide/cookbooks/k8s/","title":"Kubernetes for different stages of your projects","text":""},{"location":"user-guide/cookbooks/k8s/#goal","title":"Goal","text":"

    When starting a project using Kubernetes, usually a lot of testing is done.

    Also, as a startup, the project is trying to save costs. (since probably no clients, or just a few, are now using the product)

    To achieve this, we suggest the following path:

    For a lot of projects, Step 1 is ok for running!

    Following we'll explore the three options.

    "},{"location":"user-guide/cookbooks/k8s/#assumptions","title":"Assumptions","text":"

    We are assuming the binbash Leverage Landing Zone is deployed, an account called apps-devstg was created and a region us-east-1 is being used. In any case you can adapt these examples to other scenarios.

    "},{"location":"user-guide/cookbooks/k8s/#k3s","title":"K3s","text":""},{"location":"user-guide/cookbooks/k8s/#goal_1","title":"Goal","text":"

    A cluster with one node (master/worker) is deployed here.

    Cluster autoscaler can be used with K3s to scale nodes, but it requires a lot of work that justifies going to KOPS.

    [TBD]

    "},{"location":"user-guide/cookbooks/k8s/#kops","title":"KOPS","text":"

    See also here.

    "},{"location":"user-guide/cookbooks/k8s/#goal_2","title":"Goal","text":"

    A gossip-cluster (not exposed to Internet cluster, an Internet exposed cluster can be created using Route53) with a master node and a worker node (with node autoscaling capabilities) will be deployed here.

    More master nodes can be deployed. (i.e. one per AZ, actually three are recommended for production grade clusters)

    It will be something similar to what is stated here, but with one master, one worker, and the LB for the API in the private network.

    "},{"location":"user-guide/cookbooks/k8s/#procedure","title":"Procedure","text":"

    These are the steps:

    Ok, take it easy, now the steps explained.

    "},{"location":"user-guide/cookbooks/k8s/#0-copy-the-layer","title":"0 - Copy the layer","text":"

    A few methods can be used to download the KOPS layer directory into the binbash Leverage project.

    E.g. this addon is a nice way to do it.

    Paste this layer into the account/region chosen to host this, e.g. apps-devstg/us-east-1/, so the final layer is apps-devstg/us-east-1/k8s-kops/.

    Warning

    Do not change the 1-prerequisites, 2-kops, 3-extras dir names since scripts depend on these!

    "},{"location":"user-guide/cookbooks/k8s/#1-prerequisites","title":"1 - Prerequisites","text":"

    To create the KOPS cluster these are the requisites:

    Warning

    If the nat-gateway is not in place check how to enable it using the binbash Leverage network layer here.

    Warning

    If you will activate Karpenter you need to tag the target subnets (i.e. the private subnets in your VPC) with:

        \"kops.k8s.io/instance-group/nodes\"     = \"true\"\n\"kubernetes.io/cluster/<cluster-name>\" = \"true\"\n
    We are assuming here the worker Instance Group is called nodes. If you change the name or have more than one Instance Group you need to adapt the first tag.

    Info

    Note a DNS is not needed since this will be a gossip cluster.

    Info

    A new bucket is created so KOPS can store the state there

    By default, the account base network is used. If you want to change this check/modify this resource in config.tf file:

    data \"terraform_remote_state\" \"vpc\" {\n

    Also, shared VPC will be used to allow income traffic from there. This is because in the binbash Leverage Landing Zone defaults, the VPN server will be created there.

    cd into the 1-prerequisites directory.

    Open the locals.tf file.

    Here these items can be updated:

    Open the config.tf file.

    Here set the backend key if needed:

      backend \"s3\" {\nkey = \"apps-devstg/us-east-1/k8s-kops/prerequisites/terraform.tfstate\"\n}\n

    Info

    Remember binbash Leverage has its rules for this, the key name should match <account-name>/[<region>/]<layer-name>/<sublayer-name>/terraform.tfstate.

    Init and apply as usual:

    leverage tf init\nleverage tf apply\n

    Warning

    You will be prompted to enter the ssh_pub_key_path. Here enter the full path (e.g. /home/user/.ssh/thekey.pub) for your public SSH key and hit enter. A key managed by KMS can be used here. A regular key-in-a-file is used for this example, but you can change it as per your needs.

    Info

    Note if for some reason the nat-gateway changes, this layer has to be applied again.

    Info

    Note the role AWSReservedSSO_DevOps (the one created in the SSO for Devops) is added as system:masters. If you want to change the role, check the devopsrole in data.tf file.

    "},{"location":"user-guide/cookbooks/k8s/#2-apply-the-cluster-with-kops","title":"2 - Apply the cluster with KOPS","text":"

    cd into the 2-kops directory.

    Open the config.tf file and edit the backend key if needed:

      backend \"s3\" {\nkey = \"apps-devstg/us-east-1/k8s-kops/terraform.tfstate\"\n}\n

    Info

    Remember binbash Leverage has its rules for this, the key name should match <account-name>/[<region>/]<layer-name>/<sublayer-name>/terraform.tfstate.

    Info

    If you want to check the configuration:

    make cluster-template\n

    The final template in file cluster.yaml.

    If you are happy with the config (or you are not happy but you think the file is ok), let's create the Terraform files!

    make cluster-update\n

    Finally, apply the layer:

    leverage tf init\nleverage tf apply\n

    Cluster can be checked with this command:

    make kops-cmd KOPS_CMD=\"validate cluster\"\n
    "},{"location":"user-guide/cookbooks/k8s/#accessing-the-cluster","title":"Accessing the cluster","text":"

    Here there are two questions.

    One is how to expose the cluster so Apps running in it can be reached.

    The other one is how to access the cluster's API.

    For the first one:

    since this is a `gossip-cluster` and as per the KOPS docs: When using gossip mode, you have to expose the kubernetes API using a loadbalancer. Since there is no hosted zone for gossip-based clusters, you simply use the load balancer address directly. The user experience is identical to standard clusters. kOps will add the ELB DNS name to the kops-generated kubernetes configuration.\n

    So, we need to create a LB with public access.

    For the second one, we need to access the VPN (we have set the access to the used network previously), and hit the LB. With the cluster, a Load Balancer was deployed so you can reach the K8s API.

    "},{"location":"user-guide/cookbooks/k8s/#access-the-api","title":"Access the API","text":"

    Run:

    make kops-kubeconfig\n

    A file named as the cluster is created with the kubeconfig content (admin user, so keep it safe). So export it and use it!

    export KUBECONFIG=$(pwd)/clustername.k8s.local\nkubectl get ns\n

    Warning

    You have to be connected to the VPN to reach your cluster!

    "},{"location":"user-guide/cookbooks/k8s/#access-apps","title":"Access Apps","text":"

    You should use some sort of ingress controller (e.g. Traefik, Nginx) and set ingresses for the apps. (see Extras)

    "},{"location":"user-guide/cookbooks/k8s/#3-extras","title":"3 - Extras","text":"

    Copy the KUBECONFIG file to the 3-extras directory, e.g.:

    cp ${KUBECONFIG} ../3-extras/\n

    cd into 3-extras.

    Set the name for this file and the context in the file config.tf.

    Set what extras you want to install (e.g. traefik = true) and run the layer as usual:

    leverage tf init\nleverage tf apply\n

    You are done!

    "},{"location":"user-guide/cookbooks/k8s/#eks","title":"EKS","text":"

    See also here.

    "},{"location":"user-guide/cookbooks/k8s/#goal_3","title":"Goal","text":"

    A cluster with one node (worker) and the control plane managed by AWS is deployed here.

    Cluster autoscaler is used to create more nodes.

    "},{"location":"user-guide/cookbooks/k8s/#procedure_1","title":"Procedure","text":"

    These are the steps:

    "},{"location":"user-guide/cookbooks/k8s/#0-copy-the-layer_1","title":"0 - Copy the layer","text":"

    A few methods can be used to download the KOPS layer directory into the binbash Leverage project.

    E.g. this addon is a nice way to do it.

    Paste this layer into the account/region chosen to host this, e.g. apps-devstg/us-east-1/, so the final layer is apps-devstg/us-east-1/k8s-eks/.

    "},{"location":"user-guide/cookbooks/k8s/#1-apply-layers","title":"1 - Apply layers","text":"

    First go into each layer and config the Terraform S3 background key, CIDR for the network, names, addons, etc.

    Then apply layers as follow:

    leverage tf init --layers network,cluster,identities,addons,k8s-components\nleverage tf apply --layers network,cluster,identities,addons,k8s-components\n
    "},{"location":"user-guide/cookbooks/schedule-start-stop-ec2/","title":"Start/Stop EC2/RDS instances using schedule or manual endpoint","text":""},{"location":"user-guide/cookbooks/schedule-start-stop-ec2/#what","title":"What?","text":"

    You have EC2 instances (or RDS) that are not being used all the time... so why to keep them up and running and billing? Here we'll create a simple schedule to turn them off/on. (also with an HTTP endpoint to do it so manually)

    "},{"location":"user-guide/cookbooks/schedule-start-stop-ec2/#why","title":"Why?","text":"

    To keep your billing under control!

    "},{"location":"user-guide/cookbooks/schedule-start-stop-ec2/#how","title":"How?","text":""},{"location":"user-guide/cookbooks/schedule-start-stop-ec2/#pre-requisites","title":"Pre-requisites","text":"

    All the instances you want to start stop have to be tagged accordingly. For this example we'll use these tags:

        ScheduleStopDaily   = true\nScheduleStartManual = true\n
    "},{"location":"user-guide/cookbooks/schedule-start-stop-ec2/#the-scheduler-layer","title":"The scheduler layer","text":"

    In your binbash Leverage infra repository, under your desired account and region, copy this layer.

    You can download a directory from a git repository using this Firefox addon or any method you want.

    Remember, if the common-variables.tf file delete the file and soft-link it to the homonymous file in the root config dir: e.g. common-variables.tf -> ../../../config/common-variables.tf

    "},{"location":"user-guide/cookbooks/schedule-start-stop-ec2/#set-the-tags","title":"Set the tags","text":"

    In the tools-cloud-scheduler-stop-start layer edit the main.tf file. There are two resources: - schedule_ec2_stop_daily_midnight to stop the instances - schedule_ec2_start_daily_morning to start the instances

    You can change these names. If you do so remember to change all the references to them.

    In the resource_tags element set the right tags. E.g. this:

      resources_tag = {\nkey   = \"ScheduleStopDaily\"\nvalue = \"true\"\n}\n
    in the schedule_ec2_stop_daily_midnight resource means this resource will stop instances with tag: ScheduleStopDaily=true.

    "},{"location":"user-guide/cookbooks/schedule-start-stop-ec2/#set-the-schedule","title":"Set the schedule","text":"

    Note this line:

      cloudwatch_schedule_expression = \"cron(0 23 * * ? *)\"\n

    Here you can set the schedule in a cron-like fashion.

    If it is none it won't create a schedule (e.g. if you only need http endpoint):

      cloudwatch_schedule_expression = \"none\"\n

    Then if you set this:

      http_trigger = true\n

    A HTTP endpoint will be created to trigger the corresponding action.

    If an endpoint was created then in the outputs the URL will be shown.

    "},{"location":"user-guide/cookbooks/sops-kms/","title":"Encrypt and decrypt SOPS files with AWS KMS","text":""},{"location":"user-guide/cookbooks/sops-kms/#goal","title":"Goal","text":"

    Using a SOPS file to store secrets in the git repository.

    Encrypting the SOPS file with a KMS key.

    "},{"location":"user-guide/cookbooks/sops-kms/#assumptions","title":"Assumptions","text":"

    We are assuming the binbash Leverage Landing Zone is deployed, an account called apps-devstg was created and a region us-east-1 is being used. In any case you can adapt these examples to other scenarios.

    "},{"location":"user-guide/cookbooks/sops-kms/#prerequisites","title":"Prerequisites","text":""},{"location":"user-guide/cookbooks/sops-kms/#sops","title":"SOPS","text":"

    To know more about SOPS read here.

    "},{"location":"user-guide/cookbooks/sops-kms/#how-to","title":"How to","text":"

    We will be using binbash Leverage shell command to achieve this.

    "},{"location":"user-guide/cookbooks/sops-kms/#the-source-file","title":"The source file","text":"

    First, in the layer where you need the SOPS file, create a sample yaml, e.g. a file secrets.yaml:

    topic:\nsubtopic: value\n
    "},{"location":"user-guide/cookbooks/sops-kms/#access-the-shell","title":"Access the shell","text":"

    First, be sure your credentials are up to date. You can run a leverage tf plan and they will be updated.

    Run the shell command:

    leverage shell --mount /{path-to-your-tools-directory}/sops/ /extrabin\n
    "},{"location":"user-guide/cookbooks/sops-kms/#encrypt-the-file","title":"Encrypt the file","text":"

    Note for encrypting you need to specify an AWS Profile. In the binbash Leverage context profiles are like this: {short-project-name}-{account}-{role}. For example, for my apps-devstg account, using the role devops, in my project bb, the profile is: bb-apps-devstg-devops.

    From the new shell encrypt your file:

    AWS_PROFILE=bb-apps-devstg-devops /extrabin/sops --encrypt --kms {your-kms-arn-here} secrets.yaml > secrets.enc.yaml\n

    Info

    Since binbash Leverage Landing Zone is being used, the default key for the account+region has an alias: ${var.project}_${var.environment}_${var.kms_key_name}_key, in this case is vp_apps-devstg_default_key, so arn:aws:kms:<region>:<account>:alias/vp_apps-devstg_default_key should be used.

    Info

    To use this file with Terraform, edit the secrets.enc.yaml and at the bottom, edit the line with aws_profile and set there the AWS Profile you've used to encrypt the file.

    "},{"location":"user-guide/cookbooks/sops-kms/#decrypt-the-file","title":"Decrypt the file","text":"

    From the shell decrypt your file:

    AWS_PROFILE=bb-apps-devstg-devops /extrabin/sops --decrypt secrets.enc.yaml\n
    "},{"location":"user-guide/cookbooks/sops-kms/#how-to-use-it-with-leverage","title":"How to use it with Leverage","text":"

    Now that the secret is stored in a secure file, it can be used.

    The example here is prety simple, just getting the value and sending it to an output. But it can be used in any other resource.

    First your user (the one used to run binbash Leverage ) needs access to the used KMS key.

    Then, open the file:

    data \"sops_file\" \"secrets\" {\nsource_file = \"secrets.enc.yaml\"\n}\n

    ...and use it:

    output \"thevalue\" {\nvalue = data.sops_file.secrets.data[\"topic.subtopic\"]\n}\n

    "},{"location":"user-guide/infra-as-code-library/infra-as-code-library-forks/","title":"Leverage Open Source Modules management.","text":"

    We\u2019ll fork every Infrastructure as Code (IaC) Library dependency repo, why?

    Grant full governance over the lib repositories

    Collaborators considerations

    "},{"location":"user-guide/infra-as-code-library/infra-as-code-library-specs/","title":"Tech Specifications","text":"As Code: Hundred of thousands lines of code

    Written in:

    Stop reinventing the wheel, automated and fully as code DoD of a highly reusable, configurable, and composable sub-modules

    Which will be 100%

    Solutions must be versioned

    So as to be able to manage them as a software product with releases and change log. This way we'll be able to know which version is currently deployed in a given client and consider upgrading it.

    Env Parity

    Promote immutable, versioned infra modules based across envs.

    Updated

    Continuously perform updates, additions, and fixes to libraries and modules.

    Orchestrated in automation

    We use the leverage-cli for this purpose

    Proven & Tested

    Every commit goes through a suite of automated tests to grant code styling and functional testing.

    Cost savings by design

    The architecture for our Library / Code Modules helps an organization to analyze its current IT and DevSecOps Cloud strategy and identify areas where changes could lead to cost savings. For instance, the architecture may show that multiple database systems could be changed so only one product is used, reducing software and support costs. Provides a basis for reuse. The process of architecting can support both the use and creation of reusable assets. Reusable assets are beneficial for an organization, since they can reduce the overall cost of a system and also improve its quality, since a reusable asset has already been proven.

    Full Code Access & No Lock-In

    You get access to 100% of the code under Open Source license, if you choose to discontinue the direct support of the binbash Leverage team, you keep rights to all the code.

    Documented

    Includes code examples, use cases and thorough documentation, such as README.md, --help command, doc-string and in line comments.

    Supported & Customizable

    Commercially maintained and supported by binbash.

    "},{"location":"user-guide/infra-as-code-library/modules-library-by-technology/","title":"Modules by Technology","text":""},{"location":"user-guide/infra-as-code-library/modules-library-by-technology/#open-source-modules-repos","title":"Open Source Modules Repos","text":"Category URLs Ansible Galaxy Roles bb-leverage-ansible-roles-list Dockerfiles bb-leverage-dockerfiles-list Helm Charts bb-leverage-helm-charts-list Terraform Modules bb-leverage-terraform-modules-list"},{"location":"user-guide/infra-as-code-library/modules-library-by-technology/#open-source-private-modules-repos-via-github-teams","title":"Open Source + Private Modules Repos (via GitHub Teams)","text":"Repositories Details Reference Architecture Most of the AWS resources are here, divided by account. Dockerfiles These are Terraform module we created/imported to build reusable resources / stacks. Ansible Playbooks & Roles Playbooks we use for provisioning servers such as Jenkins, Spinnaker, Vault, and so on. Helm Charts Complementary Jenkins pipelines to clean docker images, unseal Vault, and more. Also SecOps jobs can be found here. Terraform Modules Jenkins pipelines, docker images, and other resources used for load testing."},{"location":"user-guide/infra-as-code-library/overview/","title":"Infrastructure as Code (IaC) Library","text":""},{"location":"user-guide/infra-as-code-library/overview/#overview","title":"Overview","text":"

    A collection of reusable, tested, production-ready E2E infrastructure as code solutions, leveraged by modules written in Terraform, Ansible, Dockerfiles, Helm charts and Makefiles.

    "},{"location":"user-guide/infra-as-code-library/overview/#model","title":"Model","text":"

    Our development model is strongly based on code reusability.

    "},{"location":"user-guide/infra-as-code-library/overview/#reusability","title":"Reusability","text":"

    High level summary of the the code reusability efficiency.

    Considerations

    Above detailed % are to be seen as estimates:

    "},{"location":"user-guide/infra-as-code-library/overview/#modules","title":"Modules","text":"

    Infrastructure as Code (IaC) Library development and implementation workflow.

    "},{"location":"user-guide/leverage-cli/basic-features/","title":"Basic CLI features","text":"

    To view a list of all the available commands and options in your current Leverage version simply run leverage or leverage --help. You should get an output similar to this:

    $ leverage\nUsage: leverage [OPTIONS] COMMAND [ARGS]...\n\n  Leverage Reference Architecture projects command-line tool.\n\nOptions:\n  -f, --filename TEXT  Name of the build file containing the tasks\n                       definitions.  [default: build.py]\n-l, --list-tasks     List available tasks to run.\n  -v, --verbose        Increase output verbosity.\n  --version            Show the version and exit.\n  --help               Show this message and exit.\n\nCommands:\n  aws          Run AWS CLI commands in a custom containerized environment.\n  credentials  Manage AWS cli credentials.\n  kc           Run Kubectl commands in a custom containerized environment.\n  kubectl      Run Kubectl commands in a custom containerized environment.\n  project      Manage a Leverage project.\n  run          Perform specified task(s) and all of its dependencies.\n  shell        Run a shell in a generic container.\n  terraform    Run Terraform commands in a custom containerized...\n  tf           Run Terraform commands in a custom containerized...\n  tfautomv     Run TFAutomv commands in a custom containerized...\n

    Similarly, subcommands provide further information by means of the --help flag. For example leverage tf --help.

    "},{"location":"user-guide/leverage-cli/basic-features/#global-options","title":"Global options","text":""},{"location":"user-guide/leverage-cli/history/","title":"A bit of history","text":""},{"location":"user-guide/leverage-cli/history/#how-leverage-cli-came-about","title":"How Leverage CLI came about","text":"

    The multiple tools and technologies required to work with a Leverage project were initially handled through a Makefiles system. Not only to automate and simplify the different tasks, but also to provide a uniform user experience during the management of a project.

    As a result of more and more features being added and the Leverage Reference Architecture becoming broader and broader, our Makefiles were growing large and becoming too repetitive, and thus, harder to maintain. Also, some limitations and the desire for a more friendly and flexible language than that of Makefiles made evident the need for a new tool to take their place.

    Python, a language broadly adopted for automation due to its flexibility and a very gentle learning curve seemed ideal. Even more so, Pynt, a package that provides the ability to define and manage tasks as simple Python functions satisfied most of our requirements, and thus, was selected for the job. Some gaps still remained but with minor modifications these were bridged.

    Gradually, all capabilities originally implemented through Makefiles were migrated to Python as libraries of tasks that still resided within the Leverage Reference Architecture. But soon, the need to deliver these capabilities pre-packaged in a tool instead of embedded in the infrastructure definition became apparent, and were re-implemented in the shape of built-in commands of Leverage CLI.

    Currently, the core functionality needed to interact with a Leverage project is native to Leverage CLI but a system for custom tasks definition and execution heavily inspired in that of Pynt is retained.

    "},{"location":"user-guide/leverage-cli/installation/","title":"Installation","text":"

    To use Leverage CLI you need to install it from the Python Package Index (Pypi). Currently, only Linux and Mac OS are supported operative systems.

    Requirements

    Having issues with the CLI?

    Check out the troubleshooting section to find more help.

    "},{"location":"user-guide/leverage-cli/installation/#install-pip","title":"Install Pip","text":"Ubuntu/DebianCentOS/RHELFedoraMacOS
    $ sudo apt install python3-pip\n
    $ sudo yum install python3-pip\n
    $ sudo dnf install python3-pip\n

    Pip should already be installed alongside your Python 3 installation. If for whatever reason this is not the case:

    $ brew install python3\n

    "},{"location":"user-guide/leverage-cli/installation/#install-leverage-cli","title":"Install Leverage CLI","text":"
    $ pip3 install leverage\n
    "},{"location":"user-guide/leverage-cli/installation/#update-leverage-cli-from-previous-versions","title":"Update Leverage CLI from previous versions","text":"

    Upgrade to a specific version.

    $ pip3 install -Iv leverage==1.9.1\n

    Upgrade to the latest stable version

    $ pip3 install --upgrade leverage\n

    "},{"location":"user-guide/leverage-cli/installation/#verify-your-leverage-installation","title":"Verify your Leverage installation","text":"

    Verify that your Leverage installation was successful by running

    $ leverage --help\nUsage: leverage [OPTIONS] COMMAND [ARGS]...\n\n  Leverage Reference Architecture projects command-line tool.\n\nOptions:\n  -f, --filename TEXT  Name of the build file containing the tasks\n                       definitions.  [default: build.py]\n-l, --list-tasks     List available tasks to run.\n  -v, --verbose        Increase output verbosity.\n  --version            Show the version and exit.\n  --help               Show this message and exit.\n\nCommands:\n  aws          Run AWS CLI commands in a custom containerized environment.\n  credentials  Manage AWS cli credentials.\n  kubectl      Run Kubectl commands in a custom containerized environment.\n  project      Manage a Leverage project.\n  run          Perform specified task(s) and all of its dependencies.\n  terraform    Run Terraform commands in a custom containerized...\n  tf           Run Terraform commands in a custom containerized...\n  tfautomv     Run TFAutomv commands in a custom containerized...\n

    "},{"location":"user-guide/leverage-cli/installation/#installation-in-an-isolated-environment","title":"Installation in an isolated environment","text":"

    If you prefer not to install the Leverage package globally and would like to limit its influence to only the directory of your project, we recommend using tools like Pipenv or Poetry. These tools are commonly used when working with python applications and help manage common issues that may result from installing and using such applications globally.

    "},{"location":"user-guide/leverage-cli/installation/#shell-completion","title":"Shell completion","text":"

    To enable autocompletion for Leverage in your shell, do the following:

    BashZshFish

    Add to ~/.bashrc:

    eval \"$(_LEVERAGE_COMPLETE=bash_source leverage)\"\n

    Add to ~/.zshrc:

    eval \"$(_LEVERAGE_COMPLETE=zsh_source leverage)\"\n

    Add to ~/.config/fish/completions/leverage.fish:

    eval (env _LEVERAGE_COMPLETE=fish_source leverage)\n

    Or to avoid invoking eval every time a shell starts:

    BashZshFish

    Save the script:

    _LEVERAGE_COMPLETE=bash_source leverage > ~/.leverage-complete.bash\n
    Source the script in ~/.bashrc:
    . ~/.leverage-complete.bash\n

    Save the script:

    _LEVERAGE_COMPLETE=zsh_source leverage > ~/.leverage-complete.zsh\n
    Source the script in ~/.zshrc:
    . ~/.leverage-complete.zsh\n

    Save the script to ~/.config/fish/completions/leverage.fish:

    _LEVERAGE_COMPLETE=fish_source leverage > ~/.config/fish/completions/leverage.fish\n

    Start a new shell in order to load any changes made to the shell config.

    "},{"location":"user-guide/leverage-cli/overview/","title":"Overview","text":""},{"location":"user-guide/leverage-cli/overview/#leverage-cli","title":"Leverage CLI","text":""},{"location":"user-guide/leverage-cli/overview/#overview","title":"Overview","text":"

    Leverage CLI is the tool used to manage and interact with any Leverage project.

    It transparently handles the most complex and error prone tasks that arise from working with a state-of-the-art infrastructure definition like our Leverage Reference Architecture. Leverage CLI uses a dockerized approach to encapsulate the tools needed to perform such tasks and to free the user from having to deal with the configuration and management of said tools.

    "},{"location":"user-guide/leverage-cli/overview/#repositories","title":"Repositories","text":""},{"location":"user-guide/leverage-cli/private-repositories/","title":"Private Repositories","text":""},{"location":"user-guide/leverage-cli/private-repositories/#working-with-terraform-modules-in-private-repos","title":"Working with Terraform modules in private repos","text":"

    If it is the case that the layer is using a module from a private repository read the following. E.g.:

    module \"themodule\" {\nsource = \"git@gitlab.com:some-org/some-project/the-private-repo.git//modules/the-module?ref=v0.0.1\"\n...\n}\n
    where gitlab.com:some-org/some-project/the-private-repo.git is a private repo.

    "},{"location":"user-guide/leverage-cli/private-repositories/#ssh-accessed-repository","title":"SSH accessed repository","text":"

    To source a Terraform module from a private repository in a layer via an SSH connection these considerations have to be kept in mind.

    Leverage CLI will mount the host's SSH-Agent socket into the Leverage Toolbox container, this way your keys are accessed in a secure way.

    So, if an SSH private reporitory has to be accessed, the corresponding keys need to be loaded to the SSH-Agent.

    If the agent is automatically started and the needed keys added in the host system, it should work as it is.

    These steps should be followed otherwise:

    "},{"location":"user-guide/leverage-cli/private-repositories/#using-the-ssh-config-file-to-specify-the-key-that-must-be-used-for-a-given-host","title":"Using the SSH config file to specify the key that must be used for a given host","text":"

    The ssh-agent socket is not always available in all the OS, like in Mac. So now our leverage terraform init command copies the ssh config file (and the whole .ssh directory) into the container volume, which means any custom configuration you have there, will be used. You can read more on the ssh official documentation.

    If, for example, you need to use a custom key for your private repositories on gitlab, you could add a block to your ssh config file, specifying:

    host gitlab.com\n HostName gitlab.com\n IdentityFile ~/.ssh/my_gitlab_key\n
    "},{"location":"user-guide/leverage-cli/shell/","title":"Getting Shell Access","text":""},{"location":"user-guide/leverage-cli/shell/#the-shell-environment","title":"The shell environment","text":"

    When launching a Terraform shell, Leverage provides the user with a completely isolated environment tailored to operate in the current project via a Docker container.

    The whole project is mounted on a directory named after the value for project_long in the global configuration file, or simply named \"project\" if this value is not defined. A project named myexample, would be mounted in /myexample.

    The .gitconfig user's file is also mounted on /etc/gitconfig for convenience, while (if ssh-agent is running), the socket stated in SSH_AUTH_SOCK is mounted on /ssh-agent. Also, the credentials files (credentials and config) found in the project AWS credentials directory (~/.aws/myexample), are mapped to the locations given by the environment variables AWS_SHARED_CREDENTIALS_FILE and AWS_CONFIG_FILE respectively within the container.

    "},{"location":"user-guide/leverage-cli/shell/#authentication","title":"Authentication","text":"

    Determining which credentials are needed to operate on a layer, and retrieving those credentials, may prove cumbersome for many complex layer definitions. In addition to that, correctly configuring them can also become a tedious an error prone process. For that reason Leverage automates this process upon launching the shell if requested by the user via the shell command options.

    Bear in mind, that an authenticated shell session's credentials are obtained for the layer in which the session was launched. These credentials may not be valid for other layers in which different roles need to be assumed or require more permissions.

    "},{"location":"user-guide/leverage-cli/shell/#multi-factor-authentication","title":"Multi-Factor authentication","text":"
    leverage terraform shell --mfa\n

    If MFA authentication is required, Leverage will prompt the user for the required tokens for the layer or use the cached credentials if still valid.

    The user's programmatic keys must be configured beforehand via leverage credentials configure command.

    "},{"location":"user-guide/leverage-cli/shell/#single-sign-on","title":"Single-Sign On","text":"

    If authentication via SSO is required, the user will need to configure or login into SSO before launching the shell via

    leverage terraform shell --sso\n
    "},{"location":"user-guide/leverage-cli/shell/#operations-on-the-projects-layer","title":"Operations on the project's layer","text":"

    In order to operate in a project's layer, Terraform commands such as plan or apply will need to receive extra parameters providing the location of the files that contain the definition of the variables required by the layer. Usually, these files are:

    In this case these parameters should take the form:

    -var-file=/myexample/config/common.tfvars -var-file=/myexample/account/config/account.tfvars -var-file=/myexample/account/config/backend.tfvars`\n

    Relative paths can prove useful when providing these locations. A layer definition may require more than just these files.

    So, for example, to apply changes on a standard Leverage Reference Architecture layer, the complete command would be:

    terraform apply -var-file=../../../config/common.tfvars -var-file=../../config/account.tfvars -var-file=../../config/backend.tfvars\n
    However, when initializing Terraform different parameters are needed, so it should be run as:
    terraform init -backend-config=../../config/backend.tfvars\n

    "},{"location":"user-guide/leverage-cli/extending-leverage/build.env/","title":"The build.env file","text":""},{"location":"user-guide/leverage-cli/extending-leverage/build.env/#override-defaults-via-buildenv-file","title":"Override defaults via build.env file","text":"

    By utilizing the build.env capability, you can easily change some default behaviors of the CLI. In the binbash Leverage\u2122 Ref Architecture you will find the following build.env example as an example. This allows you to specify several configurations for the CLI, such as the Leverage-Toolbox-Image you want to use, ensuring that you are using the latest version or a specific version that you prefer based on your compatibility requirements. This helps you avoid compatibility issues and ensures that your infrastructure deployments go smoothly.

    "},{"location":"user-guide/leverage-cli/extending-leverage/build.env/#buildenv-file-format","title":"build.env file format","text":"

    The build.env file format and supported parameters are the following:

    # Project settings\nPROJECT=bb\n\n# General\nMFA_ENABLED=false\n\n# Terraform\nTERRAFORM_IMAGE_TAG=1.2.7-0.1.4\n

    Customizing or extending the leverage-toolbox docker image

    You can locally copy and edit the Dockerfile in order to rebuild it based on your needs, eg for a Dockerfile placed in the current working directory: $ docker build -t binbash/leverage-toolbox:1.2.7-0.1.4 --build-arg TERRAFORM_VERSION='1.2.7' . In case you like this changes to be permanent please consider creating and submitting a PR.

    "},{"location":"user-guide/leverage-cli/extending-leverage/build.env/#working-principle-multiple-buildenv-precedence","title":"Working principle & multiple build.env precedence","text":"

    The leverage CLI has an environmental variable loading utility that will load all .env files with the given name in the current directory an all of its parents up to the repository root directory, and store them in a dictionary. Files are traversed from parent to child as to allow values in deeper directories to override possible previously existing values. Consider all files must bear the same name, which in our case defaults to \"build.env\". So you can have multiple build.env files that will be processed by the leverage CLI in the context of a specific layer of a Reference Architecture project. For example the /le-tf-infra-aws/apps-devstg/us-east-1/k8s-kind/k8s-resources/build.env file.

    "},{"location":"user-guide/leverage-cli/extending-leverage/how-to-extend/","title":"Extending & Configuring leverage CLI","text":""},{"location":"user-guide/leverage-cli/extending-leverage/how-to-extend/#override-defaults-via-buildenv-file","title":"Override defaults via build.env file","text":"

    By utilizing the build.env capability, you can easily change some default behaviors of the CLI. This allows you to specify several configurations for the CLI, such as the Leverage-Toolbox-Image that you want to use, ensuring that you are using the latest version or a specific version that you prefer based on your compatibility requirements. This helps you avoid compatibility issues and ensures that your infrastructure deployments go smoothly.

    Read More about build.env

    In order to further understand this mechanism and how to use it please visit the dedicated build.env entry.

    "},{"location":"user-guide/leverage-cli/extending-leverage/how-to-extend/#tfvars-config-files","title":".tfvars config files","text":"

    Using additional .tfvars configuration files at the account level or at the global level will allow you to extend your terraform configuration entries. Consider that using multiple .tfvars configuration files allows you to keep your configuration entries well-organized. You can have separate files for different accounts or environments, making it easy to manage and maintain your infrastructure. This also makes it easier for other team members to understand and work with your configuration, reducing the risk of misconfigurations or errors.

    Read More about .tfvars config files

    In order to further understand this mechanism and how to use it please visit the dedicated .tfvars configs entry.

    "},{"location":"user-guide/leverage-cli/extending-leverage/how-to-extend/#custom-tasks-with-buildpy","title":"Custom tasks with build.py","text":"

    Leverage CLI has a native mechanism to allow customizing your workflow. With the custom tasks feature using build.py, you can write your own tasks using Python, tailoring the CLI to fit your specific workflow. This allows you to automate and streamline your infrastructure deployments, reducing the time and effort required to manage your infrastructure. You can also easily integrate other tools and services into your workflow to further improve your productivity.

    Read More about build.py custom tasks

    In order to further understand this mechanism and how to use it please visit the dedicated build.py custom tasks entry.

    "},{"location":"user-guide/leverage-cli/extending-leverage/how-to-extend/#fork-collaborate-and-improve","title":"Fork, collaborate and improve","text":"

    By forking the leverage repository on GitHub and contributing to the project, you have the opportunity to make a positive impact on the product and the community. You can fix bugs, implement new features, and contribute your ideas and feedback. This helps to ensure that the product continues to evolve and improve, serving the needs of the community and making infrastructure deployments easier for everyone.

    Read More about contributing with the project

    In order to further understand this mechanism and how to use it please visit the dedicated CONTRIBUTING.md entry.

    "},{"location":"user-guide/leverage-cli/extending-leverage/tasks/","title":"Custom tasks","text":"

    The same way we needed to automate or simplify certain tasks or jobs for the user, you may need to do the same in your project.

    Leverage CLI does not limit itself to provide only the core functionality required to create and manage your Leverage project, but also allows for the definition of custom tasks, at the build.py root context file, that can be used to add capabilities that are outside of Leverage CLI's scope.

    By implementing new auxiliary Leverage tasks you can achieve consistency and homogeneity in the experience of the user when interacting with your Leverage project and simplify the usage of any other tool that you may require.

    To check some common included tasks please see here

    "},{"location":"user-guide/leverage-cli/extending-leverage/tasks/#tasks","title":"Tasks","text":"

    Tasks are simple python functions that are marked as such with the use of the @task() decorator. We call the file where all tasks are defined a 'build script', and by default it is assumed to be named build.py. If you use any other name for your build script, you can let Leverage know through the global option --filename.

    from leverage import task\n\n@task()\ndef copy_file(src, dst):\n\"\"\"Copy src file to dst\"\"\"\n    print(f\"Copying {src} to {dst}\")\n

    The contents in the task's docstring are used to provide a short description of what's the task's purpose when listing all available tasks to run.

    $ leverage --list-tasks\nTasks in build file `build.py`:\n\n  copy_file               Copy src file to dst\n\nPowered by Leverage 1.0.10\n

    Any argument that the task may receive are to be given when running the task. The syntax for passing arguments is similar to that of Rake.

    $ leverage run copy_file[\"/path/to/foo\",\"/path/to/bar\"]\n[09:25:59.002] [ build.py - \u279c Starting task copy_file]\nCopying /path/to/foo to /path/to/bar\n[09:25:59.005] [ build.py - \u2714 Completed task copy_file ]\n

    Keyworded arguments are also supported.

    $ leverage run copy_file[\"/path/to/foo\",dst=\"/path/to/bar\"]\n
    "},{"location":"user-guide/leverage-cli/extending-leverage/tasks/#dependencies","title":"Dependencies","text":"

    The task decorator allows for the definition of dependencies. These are defined as positional arguments in the decorator itself. Multiple dependencies can be defined for each task.

    from leverage import task\n@task()\ndef html(target=\".\"):\n\"\"\"Generate HTML.\"\"\"\n    print(f\"Generating HTML in directory \\\"{target}\\\"\")\n\n@task()\ndef images():\n\"\"\"Prepare images.\"\"\"\n    print(\"Preparing images...\")\n\n@task(html, images)\ndef start_server(host=\"localhost\", port=\"80\"):\n\"\"\"Start the server\"\"\"\n    print(f\"Starting server at {host}:{port}\")\n

    We can see how the task start_server depends on both html and images. This means that both html and images will be executed before start_server and in that same order.

    $ leverage run start_server\n[09:34:54.848] [ build.py - \u279c Starting task html ]\nGenerating HTML in directory \".\"\n[09:34:54.851] [ build.py - \u2714 Completed task html ]\n[09:34:54.852] [ build.py - \u279c Starting task images ]\nPreparing images...\n[09:34:54.854] [ build.py - \u2714 Completed task images ]\n[09:34:54.855] [ build.py - \u279c Starting task start_server ]\nStarting server at localhost:80\n[09:34:54.856] [ build.py - \u2714 Completed task start_server ]\n
    "},{"location":"user-guide/leverage-cli/extending-leverage/tasks/#ignoring-a-task","title":"Ignoring a task","text":"

    If you find yourself in the situation were there's a task that many other tasks depend on, and you need to quickly remove it from the dependency chains of all those tasks, ignoring its execution is a very simple way to achieve that end without having to remove all definitions and references across the code.

    To ignore or disable a task, simply set ignore to True in the task's decorator.

    from leverage import task\n\n@task()\ndef html(target=\".\"):\n\"\"\"Generate HTML.\"\"\"\n    print(f\"Generating HTML in directory \\\"{target}\\\"\")\n\n@task(ignore=True)\ndef images():\n\"\"\"Prepare images.\"\"\"\n    print(\"Preparing images...\")\n\n@task(html, images)\ndef start_server(server=\"localhost\", port=\"80\"):\n\"\"\"Start the server\"\"\"\n    print(f\"Starting server at {server}:{port}\")\n
    $ leverage run start_server\n[09:38:32.819] [ build.py - \u279c Starting task html ]\nGenerating HTML in directory \".\"\n[09:38:32.822] [ build.py - \u2714 Completed task html ]\n[09:38:32.823] [ build.py - \u2933 Ignoring task images ]\n[09:38:32.824] [ build.py - \u279c Starting task start_server ]\nStarting server at localhost:80\n[09:38:32.825] [ build.py - \u2714 Completed task start_server ]\n

    When listing the available tasks any ignored task will be marked as such.

    $ leverage --list-tasks\nTasks in build file `build.py`:\n\n  html                      Generate HTML.\n  images        [Ignored]   Prepare images.\n  start_server              Start the server\n\nPowered by Leverage 1.0.10\n
    "},{"location":"user-guide/leverage-cli/extending-leverage/tasks/#private-tasks","title":"Private tasks","text":"

    Sometimes you may want to define auxiliary tasks that don't need to be shown as available to run by the user. For this scenario, you can make any task into a private one. There's two ways to accomplish this, either by naming the task with an initial underscore (_) or by setting private to True in the task's decorator.

    from leverage import task\n\n@task(private=True)\ndef clean():\n\"\"\"Clean build directory.\"\"\"\n    print(\"Cleaning build directory...\")\n\n@task()\ndef _copy_resources():\n\"\"\"Copy resource files. This is a private task and will not be listed.\"\"\"\n    print(\"Copying resource files\")\n\n@task(clean, _copy_resources)\ndef html(target=\".\"):\n\"\"\"Generate HTML.\"\"\"\n    print(f\"Generating HTML in directory \\\"{target}\\\"\")\n\n@task(clean, _copy_resources, ignore=True)\ndef images():\n\"\"\"Prepare images.\"\"\"\n    print(\"Preparing images...\")\n\n@task(html, images)\ndef start_server(host=\"localhost\", port=\"80\"):\n\"\"\"Start the server\"\"\"\n    print(f\"Starting server at {host}:{port}\")\n

    Private tasks will be executed, but not shown when tasks are listed.

    $ leverage run start_server\n[09:40:33.535] [ build.py - \u279c Starting task clean ]\nCleaning build directory...\n[09:40:33.540] [ build.py - \u2714 Completed task clean ]\n[ build.py - \u279c Starting task _copy_resources ]\nCopying resource files\n[09:40:33.541] [ build.py - \u2714 Completed task _copy_resources ]\n[09:40:33.542] [ build.py - \u279c Starting task html ]\nGenerating HTML in directory \".\"\n[09:40:33.543] [ build.py - \u2714 Completed task html ]\n[09:40:33.544] [ build.py - \u279c Starting task images ]\nPreparing images...\n               [ build.py - \u2714 Completed task images ]\n[09:40:33.545] [ build.py - \u279c Starting task start_server ]\nStarting server at localhost:80\n[09:40:33.546] [ build.py - \u2714 Completed task start_server ]\n
    $ leverage --list-tasks\nTasks in build file `build.py`:\n\n  html              Generate HTML.\n  images            Prepare images.\n  start_server      Start the server\n\nPowered by Leverage 1.0.10\n

    "},{"location":"user-guide/leverage-cli/extending-leverage/tasks/#default-task","title":"Default task","text":"

    If you have a task that is run much more often than the rest, it can get tedious to always pass the name of that task to the run command. Leverage allows for the definition of a default task to address this situation. Thi task is executed when no task name is given.

    To define a default task, simply assign the already defined task to the special variable __DEFAULT__.

    from leverage import task\n\n@task()\ndef html(target=\".\"):\n\"\"\"Generate HTML.\"\"\"\n    print(f\"Generating HTML in directory \\\"{target}\\\"\")\n\n@task(ignore=True)\ndef images():\n\"\"\"Prepare images.\"\"\"\n    print(\"Preparing images...\")\n\n@task(html, images)\ndef start_server(server=\"localhost\", port=\"80\"):\n\"\"\"Start the server\"\"\"\n    print(f\"Starting server at {server}:{port}\")\n\n__DEFAULT__ = start_server\n

    The default task is marked as such when listing all available tasks.

    $ leverage --list-tasks\nTasks in build file `build.py`:\n\n  html                      Generate HTML.\n  images        [Ignored]   Prepare images.\n  start_server  [Default]   Start the server\n\nPowered by Leverage 1.0.10\n
    "},{"location":"user-guide/leverage-cli/extending-leverage/tasks/#build-scripts-lookup","title":"Build scripts lookup","text":"

    Build scripts are not only looked up in the current directory but also in all parent directories up to the root of the Leverage project. This makes it possible to launch tasks form any directory of the project as long as any parent of the current directory holds a build script.

    "},{"location":"user-guide/leverage-cli/extending-leverage/tasks/#organizing-build-scripts","title":"Organizing build scripts","text":"

    Leverage CLI treats the directory in which the build script is found as a python package. This means that you can break up your build files into modules and simply import them into your main build script, encouraging modularity and code reuse.

    Leverage CLI empowers you to create whole libraries of functionalities for your project. You can use it to better organize your tasks or implement simple auxiliary python functions.

    This way, given the following folder structure:

    leverage_project\n\u251c\u2500\u2500 build.py\n\u251c\u2500\u2500 deployment_tasks.py\n\u251c\u2500\u2500 testing_tasks.py\n\u2514\u2500\u2500 auxiliary_library\n    \u251c\u2500\u2500 reports.py\n    \u2514\u2500\u2500 utils.py\n

    The build script build.py can make use of definitions in the other files by means of importing them.

    from .deployment_tasks import *\nfrom .testing_tasks import unit_tests, functional_tests\nfrom .auxiliary_library.reports import coverage_report\nfrom .auxiliary_library.utils import format_as_table\n

    Importing user defined modules

    All import statements to user defined modules need to be relative to the main build script in order to function correctly.

    "},{"location":"user-guide/leverage-cli/extending-leverage/tasks/#known-issues","title":"Known issues","text":""},{"location":"user-guide/leverage-cli/extending-leverage/tasks/#zsh-glob-patterns-zsh-no-matches-found","title":"Zsh Glob Patterns: zsh: no matches found","text":"

    If you use zsh as your shell you might get the an error like this one: zsh: no matches found: start_server[port=8000]

    The problem has to do with the square brackets, as zhs has glob patterns enabled by default which causes every input to be interpreted like that.

    The are a few workarounds:

    1. Escape the square brackets: leverage run start_server\\[port=8000\\]

    2. Enclose the entire task between double quotes: leverage run \"start_server[port=8000]\"

    3. Disable glob patterns: noglob leverage run start_server[port=8000]

    An improvement over the last point is to create an alias for the leverage command: alias leverage='noglob leverage'

    "},{"location":"user-guide/leverage-cli/extending-leverage/tasks/#folder-names-containing-periods","title":"Folder names containing periods","text":"

    As mentioned in the Organizing build scripts section, Leverage CLI treats the directory in which the main build script is located as a python package in order to allow importing of user defined python modules. If this directory contains a period (.) in its name, this will create issues for the importing process. This is because the period is used by python to separate subpackages from their parents.

    For example, if the directory where the build script build.py is stored is named local.assets, at the time of loading the build script, python will try to locate local.build instead of locating local.assets.build and fail.

    The same situation will arise from any other subdirectory in the project. When importing modules from those directories, they wont be found.

    The simple solution to this is to avoid using periods when naming directories. If the build script is located in the project's root folder, this would also apply to that directory.

    "},{"location":"user-guide/leverage-cli/extending-leverage/tasks/#common-tasks","title":"Common tasks","text":"

    These are the common tasks included with binbash Leverage:

    "},{"location":"user-guide/leverage-cli/extending-leverage/tasks/#layer-dependency-check","title":"Layer Dependency Check","text":"

    This tasks is aimed to help to determine the current layer dependencies.

    If the current layer is getting information from remote states in different layers, then these layers have to be run before the current layer, this is called a dependency.

    To run this task, cd into the desired layer and run:

    leverage run layer_dependency\n

    This is a sample output:

    \u276f leverage run layer_dependency\n[10:37:41.817] [ build.py - \u279c Starting task _checkdir ]                                          [10:37:41.824] [ build.py - \u2714 Completed task _checkdir ]                                         [10:37:41.825] [ build.py - \u279c Starting task layer_dependency ]                                   \nNote layer dependency is calculated using remote states.\nNevertheless, other sort of dependencies could exist without this kind of resources,\ne.g. if you rely on some resource created in a different layer and not referenced here.\n{\n\"security\": {\n\"remote_state_name\": \"security\",\n  \"account\": \"apps-devstg\",\n  \"layer\": \"security-keys\",\n  \"key\": \"apps-devstg/security-keys/terraform.tfstate\",\n  \"key_raw\": \"${var.environment}/security-keys/terraform.tfstate\",\n  \"usage\": {\n\"used\": true,\n   \"files\": [\n\"/home/jdelacamara/Dev/work/BinBash/code/le-tf-infra-aws/apps-devstg/us-east-1/ec2-fleet-ansible --/ec2_fleet.tf\"\n]\n}\n},\n \"vpc\": {\n\"remote_state_name\": \"vpc\",\n  \"account\": \"apps-devstg\",\n  \"layer\": \"network\",\n  \"key\": \"apps-devstg/network/terraform.tfstate\",\n  \"key_raw\": \"${var.environment}/network/terraform.tfstate\",\n  \"usage\": {\n\"used\": true,\n   \"files\": [\n\"/home/jdelacamara/Dev/work/BinBash/code/le-tf-infra-aws/apps-devstg/us-east-1/ec2-fleet-ansible --/locals.tf\",\n    \"/home/jdelacamara/Dev/work/BinBash/code/le-tf-infra-aws/apps-devstg/us-east-1/ec2-fleet-ansible --/ec2_fleet.tf\"\n]\n}\n},\n \"vpc-shared\": {\n\"remote_state_name\": \"vpc-shared\",\n  \"account\": \"shared\",\n  \"layer\": \"network\",\n  \"key\": \"shared/network/terraform.tfstate\",\n  \"key_raw\": \"shared/network/terraform.tfstate\",\n  \"usage\": {\n\"used\": true,\n   \"files\": [\n\"/home/jdelacamara/Dev/work/BinBash/code/le-tf-infra-aws/apps-devstg/us-east-1/ec2-fleet-ansible --/ec2_fleet.tf\"\n]\n}\n}\n}\n[10:37:41.943] [ build.py - \u2714 Completed task layer_dependency ]\n

    Data:

    For a shorter version:

    \u276f leverage run layer_dependency\\['summary=True'\\]\n[10:47:00.461] [ build.py - \u279c Starting task _checkdir ]                                          [10:47:00.467] [ build.py - \u2714 Completed task _checkdir ]                                         [ build.py - \u279c Starting task layer_dependency ]                                   \nNote layer dependency is calculated using remote states.\nNevertheless, other sort of dependencies could exist without this kind of resources,\ne.g. if you rely on some resource created in a different layer and not referenced here.\n{\n\"this\": [\n\"apps-devstg/security-keys/terraform.tfstate\",\n  \"apps-devstg/network/terraform.tfstate\",\n  \"shared/network/terraform.tfstate\"\n]\n}\n[10:47:00.489] [ build.py - \u2714 Completed task layer_dependency ]  

    If you already have a binbash Leverage project created, you can download this file into your project root dir and add this import to your build.py:

    from build_deplayerchk import *\n
    "},{"location":"user-guide/leverage-cli/reference/aws/","title":"Command: aws","text":"

    The aws command is a wrapper for a containerized installation of AWS CLI 2.0. All commands are passed directly to the AWS CLI and you should expect the same behavior from all of them, except for the few exceptions listed below.

    "},{"location":"user-guide/leverage-cli/reference/aws/#configure-sso","title":"configure sso","text":""},{"location":"user-guide/leverage-cli/reference/aws/#usage","title":"Usage","text":"
    leverage aws configure sso\n

    Extracts information from the project's Terraform configuration to generate the required profiles for AWS CLI to handle SSO.

    In the process, you will need to log in via your identity provider. To allow you to do this, Leverage will attempt to open the login page in the system's default browser.

    "},{"location":"user-guide/leverage-cli/reference/aws/#sso-login","title":"sso login","text":""},{"location":"user-guide/leverage-cli/reference/aws/#usage_1","title":"Usage","text":"
    leverage aws sso login\n

    It wraps aws sso login taking extra steps to allow Leverage to use the resulting token while is valid.

    "},{"location":"user-guide/leverage-cli/reference/aws/#sso-logout","title":"sso logout","text":""},{"location":"user-guide/leverage-cli/reference/aws/#usage_2","title":"Usage","text":"
    leverage aws sso logout

    It wraps aws sso logout taking extra steps to make sure that all tokens and temporary credentials are wiped from the system. It also reminds the user to log out form the AWS SSO login page and identity provider portal. This last action is left to the user to perform.

    Important

    Please keep in mind that this command will not only remove temporary credentials but also the AWS config file. If you use such file to store your own configuration please create a backup before running the sso logout command.

    "},{"location":"user-guide/leverage-cli/reference/credentials/","title":"Command: credentials","text":"

    The credentials command is used to set up and manage the AWS CLI credentials required to interact with the AWS environment.

    All credentials's subcommands feed off the project.yaml, build.env, and Terraform configuration files to obtain the information they need. In case the basic required information is not found, the subcommands will prompt the user for it.

    "},{"location":"user-guide/leverage-cli/reference/credentials/#configure","title":"configure","text":""},{"location":"user-guide/leverage-cli/reference/credentials/#usage","title":"Usage","text":"
    leverage credentials configure --type [BOOTSTRAP|MANAGEMENT|SECURITY] [options]\n

    The credentials configure command sets up the credentials needed to interact with the AWS environment, from the initial deployment process (BOOTSTRAP) to everyday management (MANAGEMENT) and development or use (SECURITY) of it.

    It attempts to retrieve the structure of the organization in order to generate all the AWS CLI profiles required to interact with the environment and update the terraform configuration with the id of all relevant accounts.

    Backups of the previous configured credentials files are always created when overwriting or updating the current ones.

    "},{"location":"user-guide/leverage-cli/reference/credentials/#options","title":"Options","text":"

    If neither of --overwrite-existing-credentials or --skip-access-keys-setup is given, the user will be prompted to choose between both actions when appropriate.

    "},{"location":"user-guide/leverage-cli/reference/kubectl/","title":"Command: kubectl","text":"

    Regarding Leverage Toolbox versions

    To have this feature available, Leverage Toolbox versions 1.2.7-0.1.7 and up, or 1.3.5-0.1.7 and up must be used.

    The kubectl command is a wrapper for a containerized installation of kubectl. It provides the kubectl executable with specific configuration values required by Leverage.

    It transparently handles authentication, whether it is Multi-Factor or via Single Sign-On, on behalf of the user in the commands that require it. SSO Authentication takes precedence over MFA when both are active.

    The sub-commands can only be run at layer level and will not run anywhere else in the project. The sub-command configure can only be run at an EKS cluster layer level. Usually called cluster.

    The command can also be invoked via its shortened version kc.

    Configuring on first use

    To start using this command, you must first run leverage kubectl configure on a cluster layer,

    to set up the credentials on the proper config file.

    "},{"location":"user-guide/leverage-cli/reference/kubectl/#run","title":"run","text":""},{"location":"user-guide/leverage-cli/reference/kubectl/#usage","title":"Usage","text":"
    leverage kubectl [commands] [arguments]\n

    Equivalent to kubectl.

    All arguments given are passed as received to kubectl.

    Example:

    leverage kubectl get pods --namespace monitoring\n
    "},{"location":"user-guide/leverage-cli/reference/kubectl/#shell","title":"shell","text":""},{"location":"user-guide/leverage-cli/reference/kubectl/#usage_1","title":"Usage","text":"
    leverage kubectl shell\n

    Open a shell into the Kubectl container in the current directory.

    "},{"location":"user-guide/leverage-cli/reference/kubectl/#configure","title":"configure","text":""},{"location":"user-guide/leverage-cli/reference/kubectl/#usage_2","title":"Usage","text":"
    leverage kubectl configure\n

    Add the cluster from the EKS layer into your kubectl config file. Equivalent to aws eks update-kubeconfig ....

    "},{"location":"user-guide/leverage-cli/reference/project/","title":"Command: project","text":"

    The project command is used to execute global operations on the project.

    "},{"location":"user-guide/leverage-cli/reference/project/#init","title":"init","text":""},{"location":"user-guide/leverage-cli/reference/project/#usage","title":"Usage","text":"
    leverage project init\n

    The project init subcommand initializes a Leverage project in the current directory. If not found, it also initializes the global config directory for Leverage CLI ~/.leverage/, and fetches the template for the projects' creation.

    It then proceeds to drop a template file for the project configuration called project.yaml and initializes a git repository in the directory.

    "},{"location":"user-guide/leverage-cli/reference/project/#create","title":"create","text":""},{"location":"user-guide/leverage-cli/reference/project/#usage_1","title":"Usage","text":"
    leverage project create\n

    The project create subcommand creates the files structure for the architecture in the current directory and configures it based on the values set in the project.yaml file.

    It will then proceed to make sure all files follow the standard Terraform code style.

    "},{"location":"user-guide/leverage-cli/reference/run/","title":"Command: run","text":"

    The run command is used to execute user defined tasks and all of their dependencies.

    Custom tasks documentation

    "},{"location":"user-guide/leverage-cli/reference/run/#usage","title":"Usage","text":"
    leverage run [tasks]\n

    An arbitrary number of tasks can be given to the command. All tasks given must be in the form of the task name optionally followed by arguments that the task may require enclosed in square brackets, i.e. TASK_NAME[TASK_ARGUMENTS]. The execution respects the order in which they were provided.

    If no tasks are given, the default task will be executed. In case no default task is defined, the command will list all available tasks to run.

    Example:

    leverage run task1 task2[arg1,arg2] task3[arg1,kwarg1=val1,kwarg2=val2]\n

    "},{"location":"user-guide/leverage-cli/reference/shell/","title":"Command: shell","text":"

    Run a shell in a generic container. It supports mounting local paths and injecting arbitrary environment variables. It also supports AWS credentials injection via mfa/sso.

    >> leverage shell --help\n\nUsage: leverage shell [OPTIONS]\n\nRun a shell in a generic container. It supports mounting local paths and\n  injecting arbitrary environment variables. It also supports AWS credentials\n  injection via mfa/sso.\n\n  Syntax: leverage shell --mount <local-path> <container-path> --env-var <name> <value>\n  Example: leverage shell --mount /home/user/bin/ /usr/bin/ --env-var env dev\n\n  Both mount and env-var parameters can be provided multiple times.\n  Example: leverage shell --mount /home/user/bin/ /usr/bin/ --mount /etc/config.ini /etc/config.ini --env-var init 5 --env-var env dev\n\nOptions:\n  --mount <TEXT TEXT>...\n  --env-var <TEXT TEXT>...\n  --mfa                     Enable Multi Factor Authentication upon launching shell.\n  --sso                     Enable SSO Authentication upon launching shell.\n  --help                    Show this message and exit.\n
    "},{"location":"user-guide/leverage-cli/reference/terraform/","title":"Command: terraform | tf","text":"

    The terraform command is a wrapper for a containerized installation of Terraform. It provides the Terraform executable with specific configuration values required by Leverage.

    It transparently manages authentication, either Multi-Factor or Single Sign-On, on behalf of the user on commands that require it. SSO authentication takes precedence over MFA when both are active.

    Some commands can only be run at layer level and will not run anywhere else in the project.

    The command can also be invoked via its shortened version tf.

    Since version 1.12, all the subcommands supports --mount and --env-var parameters in form of tuples:

    leverage terraform --mount /home/user/bin/ /usr/bin/ --env-var FOO BAR apply\n

    You can also provide them multiple times:

    leverage terraform --mount /usr/bin/ /usr/bin/ --mount /etc/config /config --env-var FOO BAR --env-var TEST OK init\n

    "},{"location":"user-guide/leverage-cli/reference/terraform/#init","title":"init","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#usage","title":"Usage","text":"
    leverage terraform init [option] [arguments]\n

    Equivalent to terraform init.

    All arguments given are passed as received to Terraform.

    Can only be run at layer level if --layers is not set, or at account or layers-container-directory if it is.

    Layout validation is performed before actually initializing Terraform unless explicitly indicated against via the --skip-validation flag.

    "},{"location":"user-guide/leverage-cli/reference/terraform/#options","title":"Options","text":"

    Regarding S3 backend keys

    If the S3 backend block is set, and no key was defined, Leverage CLI will try to create a new one autoatically and store it in the config.tf file. It will be based on the layer path relative to the account.

    "},{"location":"user-guide/leverage-cli/reference/terraform/#plan","title":"plan","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#usage_1","title":"Usage","text":"
    leverage terraform plan [arguments]\n

    Equivalent to terraform plan.

    All arguments given are passed as received to Terraform.

    Can only be run at layer level if --layers is not set, or at account or layers-container-directory if it is.

    "},{"location":"user-guide/leverage-cli/reference/terraform/#options_1","title":"Options","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#apply","title":"apply","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#usage_2","title":"Usage","text":"
    leverage terraform apply [arguments]\n

    Equivalent to terraform apply.

    All arguments given are passed as received to Terraform.

    Can only be run at layer level if --layers is not set, or at account or layers-container-directory if it is.

    "},{"location":"user-guide/leverage-cli/reference/terraform/#options_2","title":"Options","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#destroy","title":"destroy","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#usage_3","title":"Usage","text":"
    leverage terraform destroy [arguments]\n

    Equivalent to terraform destroy.

    All arguments given are passed as received to Terraform.

    Can only be run at layer level if --layers is not set, or at account or layers-container-directory if it is.

    "},{"location":"user-guide/leverage-cli/reference/terraform/#options_3","title":"Options","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#output","title":"output","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#usage_4","title":"Usage","text":"
    leverage terraform output [arguments]\n

    Equivalent to terraform output.

    All arguments given are passed as received to Terraform.

    Can only be run at layer level if --layers is not set, or at account or layers-container-directory if it is.

    "},{"location":"user-guide/leverage-cli/reference/terraform/#options_4","title":"Options","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#version","title":"version","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#usage_5","title":"Usage","text":"
    leverage terraform version\n

    Equivalent to terraform version.

    Print Terraform version.

    "},{"location":"user-guide/leverage-cli/reference/terraform/#shell","title":"shell","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#usage_6","title":"Usage","text":"
    leverage terraform shell [option]\n

    Open a shell into the Terraform container in the current directory. An authenticated shell can only be opened at layer level.

    Terraform shell environment documentation

    "},{"location":"user-guide/leverage-cli/reference/terraform/#options_5","title":"Options","text":"

    Note: When --sso flag is used, the --mfa flag status is ignored.

    What if I want to run a Terraform command that is not supported by the CLI?

    One common error you could encounter is \"Error acquiring the state lock\", where you might need to use force-unlock. You can do the following:

    1. leverage terraform shell --sso.
    2. Then from inside the container: terraform force-unlock LOCK-ID.
    "},{"location":"user-guide/leverage-cli/reference/terraform/#format","title":"format","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#usage_7","title":"Usage","text":"
    leverage terraform format [arguments]\n

    Equivalent to terraform fmt -recursive.

    Recursively format all files in the architecture to the Terraform code style.

    All arguments given are passed as received to Terraform.

    "},{"location":"user-guide/leverage-cli/reference/terraform/#validate","title":"validate","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#usage_8","title":"Usage","text":"
    leverage terraform validate\n

    Equivalent to terraform validate.

    Check the infrastructure definition's consistency.

    "},{"location":"user-guide/leverage-cli/reference/terraform/#validate-layout","title":"validate-layout","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#usage_9","title":"Usage","text":"
    leverage terraform validate-layout\n

    Check the Terraform backend configuration in the code definition.

    When you are setting up the backend layer for the very first time, the S3 bucket does not yet exist. When running validations, Leverage CLI will detect that the S3 Key does not exist or cannot be generated. Therefore, it is necessary to first create the S3 bucket by using the init --skip-validation flag in the initialization process, and then move the \"tfstate\" file to it.

    Values checked:

    "},{"location":"user-guide/leverage-cli/reference/terraform/#import","title":"import","text":""},{"location":"user-guide/leverage-cli/reference/terraform/#usage_10","title":"Usage","text":"
    leverage terraform import ADDRESS ID\n

    Equivalent to terraform import.

    Import the resource with the given ID into the Terraform state at the given ADDRESS.

    Can only be run at layer level.

    zsh globbing

    Zsh users may need to prepend noglob to the import command for it to be recognized correctly, as an alternative, square brackets can be escaped as \\[\\]

    Examples:

    "},{"location":"user-guide/leverage-cli/reference/tfautomv/","title":"Command: tfautomv","text":"

    Regarding Leverage Toolbox versions

    For using this feature Leverage Toolbox versions 1.2.7-0.0.5 and up, or 1.3.5-0.0.1 and up must be used.

    The tfautomv command is a wrapper for a containerized installation of tfautomv. It provides the tfautomv executable with specific configuration values required by Leverage.

    It transparently handles authentication, whether it is Multi-Factor or via Single Sign-On, on behalf of the user in the commands that require it. SSO Authentication takes precedence over MFA when both are active.

    This command can only be run at layer level and will not run anywhere else in the project.

    "},{"location":"user-guide/leverage-cli/reference/tfautomv/#run","title":"run","text":""},{"location":"user-guide/leverage-cli/reference/tfautomv/#usage","title":"Usage","text":"
    leverage tfautomv run [arguments]\n

    Equivalent to tfautomv.

    All arguments given are passed as received to tfautomv.

    Example:

    leverage tfautomv run --show-analysis --dry-run\n
    "},{"location":"user-guide/leverage-cli/reference/terraform/layers/","title":"Layers parameter","text":"

    This parameter can be used with the following Leverage CLI Terraform commands:

    Value:

    Parameter Type Description --layers string A comma serparated list of layer's relative paths"},{"location":"user-guide/leverage-cli/reference/terraform/layers/#common-workflow","title":"Common workflow","text":"

    When using the --layers parameter, these commands should be run from account or layers-container-directory directories.

    Example:

    For this structure:

     home\n \u251c\u2500\u2500 user\n \u2502\u00a0\u00a0 \u2514\u2500\u2500 project\n \u2502\u00a0\u00a0     \u2514\u2500\u2500 management\n \u2502\u00a0\u00a0         \u251c\u2500\u2500 global\n \u2502\u00a0\u00a0         |   \u2514\u2500\u2500 security-base\n \u2502\u00a0\u00a0         |   \u2514\u2500\u2500 sso\n \u2502\u00a0\u00a0         \u2514\u2500\u2500 us-east-1\n \u2502\u00a0\u00a0             \u2514\u2500\u2500 terraform-backend\n

    ...any of the aforementioned commands, combined with --layers, can be called from /home/user/project/management/, /home/user/project/management/global/ or /home/user/project/management/us-east-1/.

    The value for this parameter is a comma separated list of layer's relative paths.

    Leverage CLI will iterate through the layer's relative paths, going into each one, executing the command and going back to the original directory.

    Example:

    For this command, from /home/user/project/management/:

    leverage tf plan --layers us-east-1/terraform-backend,global/security-base\n

    ...the Leverage CLI will:

    "},{"location":"user-guide/leverage-cli/reference/terraform/layers/#the-init-case","title":"The init case","text":"

    When running init Leverage CLI runs a validation.

    When using the --layers option, the validation is run for every layer before the command itself is run.

    Example:

    For this command, from /home/user/project/management/:

    leverage tf init --layers us-east-1/terraform-backend,global/security-base\n

    ...the Leverage CLI will:

    This is done this way to prevent truncated executions. Meaning, if any of the validation fails, the user will be able to fix whatever has to be fixed and run the command again as it is.

    Skipping the validation

    The --skip-validation flag still can be used here with --layers.

    "},{"location":"user-guide/leverage-cli/reference/terraform/layers/#terraform-parameters-and-flags","title":"Terraform parameters and flags","text":"

    Terraform parameters and flags can still be passed when using the --layers parameter.

    Example:

    leverage tf apply --layers us-east-1/terraform-backend,global/security-base -auto-approve\n
    "},{"location":"user-guide/ref-architecture-ansible/overview/","title":"Ansible Reference Architecture","text":""},{"location":"user-guide/ref-architecture-ansible/overview/#overview","title":"Overview","text":"

    This repository contains all the Ansible Playbooks configuration files used to create binbash Leverage\u2122 Reference Architecture for AWS.

    "},{"location":"user-guide/ref-architecture-ansible/overview/#ansible-playbook-documentation","title":"Ansible Playbook Documentation","text":"

    Check out the README.md under contained under each repo

    Playbooks Documentation

    User Management & Security

    VPN Server

    Monitoring & Alerting

    Centralized Logs

    CI/CD

    Secret Mgmt

    "},{"location":"user-guide/ref-architecture-ansible/workflow/","title":"Workflow","text":"

    Leverage CLI

    Ansible Infra

    1. Get into the folder that you need to work with (e.g. ansible-playbook-vpn-pritunl)
    2. Run leverage run init to get all the necessary Ansible roles based on each requirements.yml
    3. Make whatever changes you need to make as stated in each Playbook Documentation (check Documentation section above)
    4. For a dry run execution use leverage run apply\\[--check\\] if you only mean to preview those changes
    5. Run leverage run apply if you want to apply those changes
    6. If you want to target specific playbook tasks by tag (eg: common tag) you can run one of the following options:
      • Opt-1: leverage run apply[\"--tags\",\"common\"]
      • Opt-2: noglob leverage run apply[\"--tags\",\"common\"]
      • Opt-3: leverage shell and then ansible-playbook setup.yml --tags common
    "},{"location":"user-guide/ref-architecture-aws/configuration/","title":"Configuration","text":""},{"location":"user-guide/ref-architecture-aws/configuration/#configuration-files","title":"Configuration Files","text":"

    Config files can be found under each config folders

    "},{"location":"user-guide/ref-architecture-aws/configuration/#setting-credentials-for-terraform-via-aws-profiles","title":"Setting credentials for Terraform via AWS profiles","text":""},{"location":"user-guide/ref-architecture-aws/credentials/","title":"Credentials","text":""},{"location":"user-guide/ref-architecture-aws/credentials/#overview","title":"Overview","text":"

    Currently the following two methods are supported:

    1. AWS IAM: this is essentially using on-disk, permanent programmatic credentials that are tied to a given IAM User. This method can optionally support MFA which is highly recommended since using permanent credentials is discouraged, so at least with MFA you can counter-balance that. Keep reading...
    2. AWS IAM Identity Center (formerly known as AWS SSO): this one is more recent and it's the method recommeded by AWS since it uses roles (managed by AWS) which in turn enforce the usage of temporary credentials. Keep reading...
    "},{"location":"user-guide/ref-architecture-aws/credentials/#next-steps","title":"Next Steps","text":"

    If you are planning to choose SSO (highly recommended), check out this section.

    If you are instead interested in using IAM + MFA, refer to this other section instead.

    "},{"location":"user-guide/ref-architecture-aws/dir-structure/","title":"Project Structure","text":""},{"location":"user-guide/ref-architecture-aws/dir-structure/#filesfolders-organization","title":"Files/Folders Organization","text":"

    The following block provides a brief explanation of the chosen files/folders layout, under every account (management, shared, security, etc) folder you will see a service layer structure similar to the following:

    MyExample project file structure
        ...\n    \u251c\u2500\u2500 \ud83d\udcc2 apps-devstg\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 config\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 account.tfvars\n    |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 backend.tfvars\n    |   \u251c\u2500\u2500 \ud83d\udcc2 global\n    |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 base-identities\n    |   \u251c\u2500\u2500 \ud83d\udcc2 us-east-1\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 backups\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 base-certificates\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 base-network\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 base-tf-backend\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 cdn-s3-frontend\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 databases-aurora\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 databases-mysql\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 databases-pgsql\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 k8s-eks-demoapps\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 notifications\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-audit\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-base\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-certs\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-firewall\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 storage\n    |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 tools-cloud-nuke\n    |   \u2514\u2500\u2500 \ud83d\udcc2 us-east-2\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 k8s-eks\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-compliance\n    |    \u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 security-keys\n    \u251c\u2500\u2500 \ud83d\udcc2 apps-prd\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 config\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 account.tfvars\n    |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 backend.tfvars\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 global\n    |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 base-identities\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 us-east-1\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 backups\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 base-network\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 base-tf-backend\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 cdn-s3-frontend\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 k8s-eks\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 notifications\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-audit\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-base\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-certs\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-compliance\n    |    \u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 security-keys\n    \u251c\u2500\u2500 \ud83d\udcc4 build.env\n    \u251c\u2500\u2500 \ud83d\udcc4 build.py\n    \u251c\u2500\u2500 \ud83d\udcc2 config\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 common.tfvars\n    \u251c\u2500\u2500 \ud83d\udcc2 management\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 config\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 account.tfvars\n    |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 backend.tfvars\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 global\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 base-identities\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 cost-mgmt\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 organizations\n    |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 sso\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 us-east-1\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 backups\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 base-tf-backend\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 notifications\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-audit\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-base\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-compliance\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-keys\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 us-east-2\n    |    \u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 security-monitoring\n    \u251c\u2500\u2500 \ud83d\udcc2 network\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 config\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 account.tfvars\n    |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 backend.tfvars\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 global\n    |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 base-identities\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 us-east-1\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 base-network\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 base-tf-backend\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 network-firewall\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 notifications\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-audit\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-base\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-compliance\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-keys\n    |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 transit-gateway\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 us-east-2\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 base-network\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 network-firewall\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-compliance\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-keys\n    |    \u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 transit-gateway\n    \u251c\u2500\u2500 \ud83d\udcc2 security\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 config\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 account.tfvars\n    |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 backend.tfvars\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 global\n    |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 base-identities\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 us-east-1\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 base-tf-backend\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 firewall-manager\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 notifications\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-audit\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-base\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-compliance\n    |   \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-keys\n    |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 security-monitoring\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 us-east-2\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-audit\n    |    \u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc2 security-compliance\n    |    \u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 security-monitoring\n    \u2514\u2500\u2500 \ud83d\udcc2 shared\n    \u251c\u2500\u2500 \ud83d\udcc2 config\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 \ud83d\udcc4 account.tfvars\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc4 backend.tfvars\n    \u251c\u2500\u2500 \ud83d\udcc2 global\n    |   \u251c\u2500\u2500 \ud83d\udcc2 base-dns\n    |   \u2514\u2500\u2500 \ud83d\udcc2 base-identities\n    \u251c\u2500\u2500 \ud83d\udcc2 us-east-1\n    |   \u251c\u2500\u2500 \ud83d\udcc2 backups\n    |   \u251c\u2500\u2500 \ud83d\udcc2 base-network\n    |   \u251c\u2500\u2500 \ud83d\udcc2 base-tf-backend\n    |   \u251c\u2500\u2500 \ud83d\udcc2 container-registry\n    |   \u251c\u2500\u2500 \ud83d\udcc2 ec2-fleet\n    |   \u251c\u2500\u2500 \ud83d\udcc2 k8s-eks\n    |   \u251c\u2500\u2500 \ud83d\udcc2 k8s-eks-demoapps\n    |   \u251c\u2500\u2500 \ud83d\udcc2 k8s-eks-prd\n    |   \u251c\u2500\u2500 \ud83d\udcc2 notifications\n    |   \u251c\u2500\u2500 \ud83d\udcc2 security-audit\n    |   \u251c\u2500\u2500 \ud83d\udcc2 security-base\n    |   \u251c\u2500\u2500 \ud83d\udcc2 security-compliance\n    |   \u251c\u2500\u2500 \ud83d\udcc2 storage\n    |   \u251c\u2500\u2500 \ud83d\udcc2 tools-cloud-scheduler-stop-start\n    |   \u251c\u2500\u2500 \ud83d\udcc2 tools-eskibana\n    |   \u251c\u2500\u2500 \ud83d\udcc2 tools-github-selfhosted-runners\n    |   \u251c\u2500\u2500 \ud83d\udcc2 tools-jenkins\n    |   \u251c\u2500\u2500 \ud83d\udcc2 tools-managedeskibana\n    |   \u251c\u2500\u2500 \ud83d\udcc2 tools-prometheus\n    |   \u251c\u2500\u2500 \ud83d\udcc2 tools-vault\n    |   \u251c\u2500\u2500 \ud83d\udcc2 tools-vpn-server\n    |   \u2514\u2500\u2500 \ud83d\udcc2 tools-webhooks\n    \u00a0\u00a0 \u2514\u2500\u2500 \ud83d\udcc2 us-east-2\n    \u251c\u2500\u2500 \ud83d\udcc2 base-network\n    \u251c\u2500\u2500 \ud83d\udcc2 container-registry\n    \u251c\u2500\u2500 \ud83d\udcc2 security-compliance\n    \u251c\u2500\u2500 \ud83d\udcc2 security-keys\n    \u251c\u2500\u2500 \ud83d\udcc2 tools-eskibana\n    \u2514\u2500\u2500 \ud83d\udcc2 tools-prometheus\n

    Configuration files are organized by environments (e.g. dev, stg, prd), and service type, which we call layers (identities, organizations, storage, etc) to keep any changes made to them separate. Within each of those layers folders you should find the Terraform files that are used to define all the resources that belong to such account environment and specific layer.

    Project file structure

    An extended project file structure could be found here While some other basic concepts and naming conventions in the context of Leverage like \"project\" and \"layer\" here

    Figure: AWS Organization multi-account architecture diagram. (Source: binbash Leverage, \"Leverage Reference Architecture components\", binbash Leverage Doc, accessed August 4th 2021).

    NOTE: As a convention folders with the -- suffix reflect that the resources are not currently created in AWS, basically they've been destroyed or not yet exist.

    Such layer separation is meant to avoid situations in which a single folder contains a lot of resources. That is important to avoid because at some point, running leverage terraform plan / apply starts taking too long and that becomes a problem.

    This organization also provides a layout that is easier to navigate and discover. You simply start with the accounts at the top level and then you get to explore the resource categories within each account.

    "},{"location":"user-guide/ref-architecture-aws/overview/","title":"AWS Reference Architecture","text":""},{"location":"user-guide/ref-architecture-aws/overview/#overview","title":"Overview","text":"

    The AWS Reference Architecture was created on a set of opinionated definitions and conventions on:

    Key Concept

    Although the Reference Architecture for AWS was initially designed to be compatible with web, mobile and microservices application stacks, it can also accommodate other types of workloads such as machine learning, blockchain, media, and more.

    It was designed with modularity in mind. A multi-accounts approach is leveraged in order to improve security isolation and resources separation. Furthermore each account infrastructure is divided in smaller units that we call layers. Each layer contains all the required resources and definitions for a specific service or feature to function.

    Key Concept

    The design is strongly based on the AWS Well Architected Framework.

    Each individual configuration of the Reference Architecture is referred to as a project. A Leverage project is comprised of all the relevant accounts and layers.

    "},{"location":"user-guide/ref-architecture-aws/overview/#core-strengths","title":"Core Strengths","text":""},{"location":"user-guide/ref-architecture-aws/overview/#a-more-visual-example","title":"A More Visual Example","text":"

    The following diagram shows the type of AWS multi-account setup you can achieve by using this Reference Architecture:

    Figure: AWS Organization multi-account reference architecture diagram. (Source: binbash Leverage, \"Leverage Reference Architecture components\", binbash Leverage Doc, accessed August 4th 2021).

    Read more

    "},{"location":"user-guide/ref-architecture-aws/references/","title":"References","text":"

    The following are official AWS documentations, blog posts and whitepapers we have considered while building our Reference Solutions Architecture:

    "},{"location":"user-guide/ref-architecture-aws/tf-state/","title":"Terraform - S3 & DynamoDB for Remote State Storage & Locking","text":""},{"location":"user-guide/ref-architecture-aws/tf-state/#overview","title":"Overview","text":"

    Use this terraform configuration files to create the S3 bucket & DynamoDB table needed to use Terraform Remote State Storage & Locking.

    What is the Terraform Remote State?

    Read the official definition by Hashicorp.

    Figure: Terraform remote state store & locking necessary AWS S3 bucket and DynamoDB table components. (Source: binbash Leverage, \"Terraform Module: Terraform Backend\", Terraform modules registry, accessed December 3rd 2020)."},{"location":"user-guide/ref-architecture-aws/tf-state/#prerequisites","title":"Prerequisites","text":"

    Terraform repo structure + state backend initialization

    1. Ensure you have Leverage CLI installed in your system
    2. Refer to Configuration Pre-requisites to understand how to set up the configuration files required for this layer. Where you must build your Terraform Reference Architecture account structure
    3. Leveraged by the Infrastructure as Code (IaC) Library through the terraform-aws-tfstate-backend module
      • /management/base-tf-backend
      • /security/base-tf-backend
      • /shared/base-tf-backend
      • /network/base-tf-backend
      • /apps-devstg/base-tf-backend
      • /apps-prd/base-tf-backend
    "},{"location":"user-guide/ref-architecture-aws/tf-state/#set-up","title":"Set up","text":"

    Steps to initialize your tf-backend

    1. At the corresponding account dir, eg: /shared/base-tf-backend then,
    2. Run leverage terraform init --skip-validation
    3. Run leverage terraform plan, review the output to understand the expected changes
    4. Run leverage terraform apply, review the output once more and type yes if you are okay with that
    5. This should create a terraform.tfstate file in this directory but we don't want to push that to the repository so let's push the state to the backend we just created

      • Open config.tf and uncomment the following lines:
          # backend \"s3\" {\n  #   key = \"shared/tf-backend/terraform.tfstate\"\n  # }\n
      • Run leverage terraform init and type yes when Terraform asks if you want to import the state to the S3 backend
      • Done. You can remove terraform.tfstate now (and also terraform.tfstate.backup if available)
    "},{"location":"user-guide/ref-architecture-aws/tf-state/#expected-workflow-after-set-up","title":"Expected workflow after set up","text":"

    This video is outdated!

    "},{"location":"user-guide/ref-architecture-aws/tf-state/#terraform-remote-state","title":"Terraform Remote State","text":"

    In the base-tf-backend folder you should find the definition of the infrastructure that needs to be deployed before you can get to work with anything else.

    IMPORTANT: THIS IS ONLY NEEDED IF THE BACKEND WAS NOT CREATED YET. IF THE BACKEND ALREADY EXISTS YOU JUST USE IT.

    "},{"location":"user-guide/ref-architecture-aws/workflow/","title":"Workflow","text":""},{"location":"user-guide/ref-architecture-aws/workflow/#overview","title":"Overview","text":"

    The sequence of commands that you run to operate on each layer is called the Terraform workflow. In other words, it's what you would typically run in order to create, update, or delete the resources defined in a given layer.

    "},{"location":"user-guide/ref-architecture-aws/workflow/#the-basic-workflow","title":"The basic workflow","text":"

    Assuming that you have everything configured, the frequent commands you'll need to run are these:

    # 1. Initialize\nleverage terraform init\n\n# 2. Preview any changes\nleverage terraform plan\n\n# 3. Apply any changes\nleverage terraform apply\n

    "},{"location":"user-guide/ref-architecture-aws/workflow/#the-extended-workflow","title":"The extended workflow","text":"

    Now, the extended workflow is annotated with more explanations and it is intended for users who haven't yet worked with Leverage on a daily basis:

    Terraform Workflow

    1. Make sure you understood the basic concepts:
      • Overview
      • Configuration
      • Directory Structure
      • Remote State
    2. Make sure you installed the Leverage CLI.
    3. Go to the layer (directory) you need to work with, e.g. shared/global/base-identities/.
    4. Run leverage terraform init -- only the first time you work on this layer, or if you upgraded modules or providers versions, or if you made changes to the Terraform remote backend configuration.
    5. Make any changes you need to make. For instance: modify a resource definition, add an output, add a new resource, etc.
    6. Run leverage terraform plan to preview any changes.
    7. Run leverage terraform apply to give it a final review and to apply any changes.

    Tip

    You can use the --layers argument to run Terraform commands on more than one layer. For more information see here

    Note

    If desired, at step #5 you could submit a PR, allowing you and the rest of the team to understand and review what changes would be made to your AWS Cloud Architecture components before executing leverage terraform apply (terraform apply). This brings the huge benefit of treating changes with a GitOps oriented approach, basically as we should treat any other code & infrastructure change, and integrate it with the rest of our tools and practices like CI/CD, in

    "},{"location":"user-guide/ref-architecture-aws/workflow/#running-in-automation","title":"Running in Automation","text":"Figure: Running terraform with AWS in automation (just as reference).

    Read More

    "},{"location":"user-guide/ref-architecture-aws/features/","title":"Index","text":""},{"location":"user-guide/ref-architecture-aws/features/#features","title":"Features","text":""},{"location":"user-guide/ref-architecture-aws/features/#overview","title":"Overview","text":"

    This reference architecture supports a growing number of AWS services. This section lists all of them and goes through each in depth.

    Governance | AWS Organizations

    Identity Management

    Single Sign-On (SSO)

    Cost Monitoring & Optimization

    Security

    Networking | VPC, TGW, NFW, DNS and NACLs

    Secrets Management

    Compute

    Databases

    Storage

    Content Delivery Network (CDN)

    CI/CD (Continuous Integration / Continuous Delivery)

    Monitoring | Metrics, Logs, APM and Tracing

    Reliability

    "},{"location":"user-guide/ref-architecture-aws/features/cdn/cdn/","title":"CDN","text":"

    AWS Cloud Front

    Amazon CloudFront is a fast content delivery network (CDN) service that securely delivers data, videos, applications, and APIs to customers globally with low latency, high transfer speeds, all within a developer-friendly environment. CloudFront is integrated with AWS \u2013 both physical locations that are directly connected to the AWS global infrastructure, as well as other AWS services. CloudFront works seamlessly with services including AWS Shield for DDoS mitigation, Amazon S3, Elastic Load Balancing, API Gateway or Amazon EC2 as origins for your applications, and Lambda@Edge to run custom code closer to customers\u2019 users and to customize the user experience. Lastly, if you use AWS origins such as Amazon S3, Amazon EC2 or Elastic Load Balancing, you don\u2019t pay for any data transferred between these services and CloudFront.

    "},{"location":"user-guide/ref-architecture-aws/features/cdn/cdn/#load-balancer-alb-nlb-s3-cloudfront-origins","title":"Load Balancer (ALB | NLB) & S3 Cloudfront Origins","text":"Figure: AWS CloudFront with ELB and S3 as origin diagram. (Source: Lee Atkinson, \"How to Help Achieve Mobile App Transport Security (ATS) Compliance by Using Amazon CloudFront and AWS Certificate Manager\", AWS Security Blog, accessed November 17th 2020)."},{"location":"user-guide/ref-architecture-aws/features/cdn/cdn/#api-gateway-cloudfront-origins","title":"API Gateway Cloudfront Origins","text":"Figure: AWS CloudFront with API Gateway as origin diagram. (Source: AWS, \"AWS Solutions Library, AWS Solutions Implementations Serverless Image Handler\", AWS Solutions Library Solutions Implementations, accessed November 17th 2020)."},{"location":"user-guide/ref-architecture-aws/features/ci-cd/argocd/","title":"ArgoCD","text":""},{"location":"user-guide/ref-architecture-aws/features/ci-cd/argocd/#argocd","title":"ArgoCD","text":""},{"location":"user-guide/ref-architecture-aws/features/ci-cd/argocd/#aws-apps-services-k8s-eks-accounts-diagram","title":"AWS Apps & Services K8s EKS accounts diagram","text":"

    The below diagram is based on our binbash Leverage Reference Architecture CI-CD official documentation

    Figure: K8S reference architecture CI/CD with ArgoCD diagram. (Source: binbash Leverage Confluence Doc, \"Implementation Diagrams\", binbash Leverage Doc, accessed August 4th 2021)."},{"location":"user-guide/ref-architecture-aws/features/ci-cd/jenkins-argocd/","title":"CI/CD","text":""},{"location":"user-guide/ref-architecture-aws/features/ci-cd/jenkins-argocd/#jenkins-argocd","title":"Jenkins + ArgoCD","text":"Figure: ACI/CD with Jenkins + ArgoCD architecture diagram. (Source: ArgoCD, \"Overview - What Is Argo CD\", ArgoCD documentation, accessed November 18th 2020)."},{"location":"user-guide/ref-architecture-aws/features/ci-cd/jenkins-spinnaker/","title":"CI/CD","text":""},{"location":"user-guide/ref-architecture-aws/features/ci-cd/jenkins-spinnaker/#jenkins-spinnaker","title":"Jenkins + Spinnaker","text":"Figure: CI/CD with Jenkins + Spinnaker diagram. (Source: Irshad Buchh, \"Continuous Delivery using Spinnaker on Amazon EKS\", AWS Open Source Blog, accessed November 18th 2020)."},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-eks/","title":"AWS Elastic Kubernetes Service (EKS)","text":"

    Important

    Please check the Reference Architecture for EKS to learn more details about this.

    "},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-kops/","title":"AWS Kubernetes Kops Cluster","text":""},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-kops/#overview","title":"Overview","text":"

    Kops is an official Kubernetes project for managing production-grade Kubernetes clusters. Kops is currently the best tool to deploy Kubernetes clusters to Amazon Web Services. The project describes itself as kubectl for clusters.

    Core Features

    Figure: AWS K8s Kops architecture diagram (just as reference). (Source: Carlos Rodriguez, \"How to deploy a Kubernetes cluster on AWS with Terraform & kops\", Nclouds.com Blog post, accessed November 18th 2020)."},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-kops/#kops-pre-requisites","title":"Kops Pre-requisites","text":"

    Important consideration

    K8s clusters provisioned by Kops have a number of resources that need to be available before the cluster is created. These are Kops pre-requisites and they are defined in the 1-prerequisites directory which includes all Terraform files used to create/modify these resources.

    The current code has been fully tested with the AWS VPC Network Module

    OS pre-req packages

    Ref Link: https://github.com/kubernetes/kops/blob/master/docs/install.md)

    "},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-kops/#resulting-solutions-architecture","title":"Resulting Solutions Architecture","text":"Figure: AWS K8s Kops architecture diagram (just as reference)."},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-kops/#why-this-workflow","title":"Why this workflow","text":"

    The workflow follows the same approach that is used to manage other terraform resources in your AWS accounts. E.g. network, identities, and so on.

    So we'll use existing AWS resources to create a cluster-template.yaml containing all the resource IDs that Kops needs to create a Kubernetes cluster.

    Why not directly use Kops CLI to create the K8s cluster as well as the VPC and its other dependencies?

    1. While this is a valid approach, we want to manage all these building blocks independently and be able to fully customize any AWS component without having to alter our Kubernetes cluster definitions and vice-versa.

    2. This is a fully declarative coding style approach to manage your infrastructure so being able to declare the state of our cluster in YAML files fits 100% as code & GitOps based approach.

    Figure: [Workflow diagram](https://medium.com/bench-engineering/deploying-kubernetes-clusters-with-kops-and-terraform-832b89250e8e)."},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-kops/#kops-cluster-management","title":"Kops Cluster Management","text":"

    The 2-kops directory includes helper scripts and Terraform files in order to template our Kubernetes cluster definition. The idea is to use our Terraform outputs from 1-prerequisites to construct a cluster definition.

    "},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-kops/#overview_1","title":"Overview","text":"

    Cluster Management via Kops is typically carried out through the kops CLI. In this case, we use a 2-kops directory that contains a Makefile, Terraform files and other helper scripts that reinforce the workflow we use to create/update/delete the cluster.

    "},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-kops/#workflow","title":"Workflow","text":"

    This workflow is a little different to the typical Terraform workflows we use. The full workflow goes as follows:

    Cluster: Creation & Update

    1. Modify files under 1-prerequisites
    2. Main files to update probably are locals.tf and outputs.tf
    3. Mostly before the cluster is created but could be needed afterward
    4. Modify cluster-template.yml under 2-kops folder
    5. E.g. to add or remove instance groups, upgrade k8s version, etc
    6. At 2-kops/ context run make cluster-update will follow the steps below
    7. Get Terraform outputs from 1-prerequisites
    8. Generate a Kops cluster manifest -- it uses cluster-template.yml as a template and the outputs from the point above as replacement values
    9. Update Kops state -- it uses the generated Kops cluster manifest in previous point (cluster.yml)
    10. Generate Kops Terraform file (kubernetes.tf) -- this file represents the changes that Kops needs to apply on the cloud provider.
    11. Run make plan
    12. To preview any infrastructure changes that Terraform will make.
    13. If desired we could submit a PR, allowing you and the rest of the team to understand and review what changes would be made to the Kubernetes cluster before executing make apply (terraform apply). This brings the huge benefit of treating changes to our Kubernetes clusters with a GitOps oriented approach, basically like we treat any other code & infrastructure change, and integrate it with the rest of our tools and practices like CI/CD, integration testing, replicate environments and so on.
    14. Run make apply
    15. To apply those infrastructure changes on AWS.
    16. Run make cluster-rolling-update
    17. To determine if Kops needs to trigger some changes to happen right now (dry run)
    18. These are usually changes to the EC2 instances that won't get reflected as they depend on the autoscaling
    19. Run make cluster-rolling-update-yes
    20. To actually make any changes to the cluster masters/nodes happen

    Cluster: Deletion

    To clean-up any resources created for your K8s cluster, you should run:

    1. At 2-kops folder context run make destroy
    2. This will execute a terraform destroy of all the kubernets.tf declared AWS resources.
    3. At 2-kops folder context run cluster-destroy
    4. Will run Kops destroy cluster -- only dry run, no changes will be applied
    5. Exec cluster-destroy-yes
      • Kops will effectively destroy all the remaining cluster resources.
    6. Finally if at 1-prerequisites exec make destroy
      • This will remove Kops state S3 bucket + any other extra resources you've provisioned for your cluster.
    "},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-kops/#typical-workflow","title":"Typical Workflow","text":"

    The workflow may look complicated at first but generally it boils down to these simplified steps: 1. Modify cluster-template.yml 2. Run make cluster-update 3. Run make apply 4. Run make cluster-rolling-update-yes

    What about persistent and stateful K8s resources?

    This approach will work better the more stateless your Kubernetes workloads are. Treating Kubernetes clusters as ephemeral and replaceable infrastructure requires to consider not to use persistent volumes or the drawback of difficulties when running workloads such as databases on K8s. We feel pretty confident that we can recreate our workloads by applying each of our service definitions, charts and manifests to a given Kubernetes cluster as long as we keep the persistent storage separately on AWS RDS, DynamoDB, EFS and so on. In terms of the etcd state persistency, Kops already provisions the etcd volumes (AWS EBS) independently to the master instances they get attached to. This helps to persist the etcd state after rolling update your master nodes without any user intervention. Moreover simplifying volume backups via EBS Snapshots (consider https://github.com/binbashar/terraform-aws-backup-by-tags). We also use a very valuable backup tool named Velero (formerly Heptio Ark - https://github.com/vmware-tanzu/velero) to o back up and restore our Kubernetes cluster resources and persistent volumes.

    TODO

    1. IMPORTANT: Kops terraform output (kops update cluster --target terraform) is still generated for Terraform 0.11.x (https://github.com/kubernetes/kops/issues/7052) we'll take care of the migration when tf-0.12 gets fully supported.
    2. Create a binbash Leverage public Confluence Wiki entry detailing some more info about etcd, calico and k8s versions compatibilities
    "},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-service-mesh/","title":"Service Mesh","text":""},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-service-mesh/#overview","title":"Overview","text":"

    Ultra light, ultra simple, ultra powerful. Linkerd adds security, observability, and reliability to Kubernetes, without the complexity. CNCF-hosted and 100% open source.

    "},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-service-mesh/#how-it-works","title":"How it works","text":"

    How Linkerd works

    Linkerd works by installing a set of ultralight, transparent proxies next to each service instance. These proxies automatically handle all traffic to and from the service. Because they\u2019re transparent, these proxies act as highly instrumented out-of-process network stacks, sending telemetry to, and receiving control signals from, the control plane. This design allows Linkerd to measure and manipulate traffic to and from your service without introducing excessive latency.

    "},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-service-mesh/#architecture","title":"Architecture","text":"Figure: Figure: Linkerd v2.10 architecture diagram. (Source: Linkerd official documentation, \"High level Linkerd control plane and a data plane.\", Linkerd Doc, accessed June 14th 2021)."},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-service-mesh/#dashboard","title":"Dashboard","text":"Figure: Figure: Linkerd v2.10 dashboard. (Source: Linkerd official documentation, \"Linkerd dashboard\", Linkerd Doc, accessed June 14th 2021)."},{"location":"user-guide/ref-architecture-aws/features/compute/k8s-service-mesh/#read-more","title":"Read more","text":"

    Related resources

    "},{"location":"user-guide/ref-architecture-aws/features/compute/overview/","title":"Compute","text":""},{"location":"user-guide/ref-architecture-aws/features/compute/overview/#containers-and-serverless","title":"Containers and Serverless","text":"

    Overview

    In order to serve Client application workloads we propose to implement Kubernetes, and proceed to containerize all application stacks whenever it\u2019s the best solution (we\u2019ll also consider AWS Lambda for a Serverless approach when it fits better). Kubernetes is an open source container orchestration platform that eases the process of running containers across many different machines, scaling up or down by adding or removing containers when demand changes and provides high availability features. Also, it serves as an abstraction layer that will give Client the possibility, with minimal effort, to move the apps to other Kubernetes clusters running elsewhere, or a managed Kubernetes service such as AWS EKS, GCP GKE or others.

    Clusters will be provisioned with Kops and/or AWS EKS, which are solutions meant to orchestrate this compute engine in AWS. Whenever possible the initial version deployed will be the latest stable release.

    Figure: Kubernetes high level components architecture. (Source: Andrew Martin, \"11 Ways (Not) to Get Hacked\", Kubernetes.io Blog post, accessed November 18th 2020)."},{"location":"user-guide/ref-architecture-aws/features/compute/overview/#kubernetes-addons","title":"Kubernetes addons","text":"

    Some possible K8s addons could be

    Security

    Networking

    Monitoring & Logs

    Distributed Tracing

    UI Dashboard

    Availability & Reliability

    Utilities

    "},{"location":"user-guide/ref-architecture-aws/features/compute/serverless/","title":"Serverless Compute","text":"

    As stated by AWS Serverless definitions

    What is serverless?

    Serverless is the native architecture of the cloud that enables you to shift more of your operational responsibilities to AWS, increasing your agility and innovation. Serverless allows you to build and run applications and services without thinking about servers. It eliminates infrastructure management tasks such as server or cluster provisioning, patching, operating system maintenance, and capacity provisioning. You can build them for nearly any type of application or backend service, and everything required to run and scale your application with high availability is handled for you.

    Why use serverless?

    Serverless enables you to build modern applications with increased agility and lower total cost of ownership. Building serverless applications means that your developers can focus on their core product instead of worrying about managing and operating servers or runtimes, either in the cloud or on-premises. This reduced overhead lets developers reclaim time and energy that can be spent on developing great products which scale and that are reliable.

    Figure: AWS serverless architecture diagram (just as reference). (Source: Nathan Peck, \"Designing a modern serverless application with AWS Lambda and AWS Fargate\", Containers-on-AWS Medium Blog post, accessed November 18th 2020).

    Serverless Compute Services

    "},{"location":"user-guide/ref-architecture-aws/features/compute/tools/","title":"Infrastructure Instances Tools","text":""},{"location":"user-guide/ref-architecture-aws/features/compute/tools/#overview","title":"Overview","text":"

    Apart from the EC2 instances that are part of Kubernetes, there are going to be other instances running tools for monitoring, logging centralization, builds/tests, deployment, among others. that are to be defined at this point. Some of them can be replaced by managed services, like: CircleCI, Snyk, etc, and this can have cons and pros that will need to be considered at the time of implementation. Any OS that is provisioned will be completely reproducible as code, in the event of migration to another vendor.

    Other settings for all EC2 instances

    Infrastructure EC2 instances

    "},{"location":"user-guide/ref-architecture-aws/features/costs/costs/","title":"Cost Estimation & Optimization","text":""},{"location":"user-guide/ref-architecture-aws/features/costs/costs/#opportunity-to-optimize-resources","title":"Opportunity to optimize resources","text":"

    Compute

    Databases

    Monitoring & Automation

    Storage & Network Traffic

    "},{"location":"user-guide/ref-architecture-aws/features/costs/costs/#consideration","title":"Consideration","text":"

    Reserved Instances

    "},{"location":"user-guide/ref-architecture-aws/features/costs/costs/#read-more","title":"Read more","text":"

    Reference links

    Consider the following extra links as reference:

    "},{"location":"user-guide/ref-architecture-aws/features/database/database/","title":"Databases","text":""},{"location":"user-guide/ref-architecture-aws/features/database/database/#overview","title":"Overview","text":"

    Will implement AWS RDS databases matching the requirements of the current application stacks. If the region selected is the same you're actually using for your legacy AWS RDS instances we will be able to create a peering connection to existing databases in order to migrate the application stacks first, then databases.

    AWS RDS Specs

    "},{"location":"user-guide/ref-architecture-aws/features/database/mysql/","title":"RDS | MySQL","text":"

    TODO Add this

    "},{"location":"user-guide/ref-architecture-aws/features/database/postgres/","title":"RDS | PostgresSQL","text":"

    TODO Add this

    "},{"location":"user-guide/ref-architecture-aws/features/identities/credentials-vault/","title":"Hashicorp Vault credentials","text":""},{"location":"user-guide/ref-architecture-aws/features/identities/credentials-vault/#hashicorp-vault-private-api-endpoint","title":"Hashicorp Vault private API endpoint","text":"

    If you are on HCP, you can get this from the Admin UI. Otherwise, it will depend on how you set up DNS, TLS and port settings for your self-hosted installation. We always favours a private endpoint deployment only accessible from the VPN.

    "},{"location":"user-guide/ref-architecture-aws/features/identities/credentials-vault/#aws-self-hosted-vault-instance-endpoint","title":"AWS Self Hosted Vault Instance Endpoint","text":"

    vault_address = \"https://vault.aws.binbash.co:8200\"

    "},{"location":"user-guide/ref-architecture-aws/features/identities/credentials-vault/#hcp-vault-private-api-endpoint","title":"HCP Vault private API endpoint","text":"

    vault_address = \"https://bb-le-shared-vault-cluster.private.vault.xxxxxxxxxx.aws.hashicorp.cloud:8200\"

    "},{"location":"user-guide/ref-architecture-aws/features/identities/credentials-vault/#hashicorp-vault-token","title":"Hashicorp Vault token","text":"

    We'll need to setup this Vault auth token in our [/config/common.config] file whenever we run the Terraform Leverage Reference architecture for:

    Vault token generation and authentication

    Vault token that will be used by Terraform, or vault cli to perform calls to Vault API. During the initial setup, you will have to use a root token. If you are using a self-hosted installation you will get such token after you initialize Vault; if you are using Hashicorp Cloud Platform you can get the token from HCP Admin UI.

    After the initial setup, and since we recommend integrating Vault to Github for authentication, you will have to follow these steps:

    1. Generate a GitHub Personal Access Token: https://github.com/settings/tokens
    2. Click \u201cGenerate new token\u201c
    3. Under scopes, only select \"read:org\", under \"admin:org\"
    "},{"location":"user-guide/ref-architecture-aws/features/identities/credentials-vault/#get-vault-token-from-your-gh-auth-token","title":"Get vault token from your GH auth token","text":"
    1. Run vault cli v\u00eda docker: docker run -it vault:1.7.2 sh
    2. Vault ENV vars setup ( NOTE: this will change a little bit between AWS self-hosted vs HCP vault deployment)
      export VAULT_ADDR=\"https://vault-cluster.private.vault.XXXXXX.aws.hashicorp.cloud:8200\"; \\\nexport VAULT_NAMESPACE=\"admin\"\n
    3. vault login -method=github
      \u256d\u2500 \uf179 \ue0b1 \uf015 ~ \ue0b0\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\ue0b2 \u2714 \ue0b3 at 14:21:27 \uf017\n\u2570\u2500 docker run -it vault:1.7.2 sh\n/ # export VAULT_ADDR=\"https://bb-le-shared-vault-cluster.private.vault.xxxxxxx.a\nws.hashicorp.cloud:8200\"; export VAULT_NAMESPACE=\"admin\"\n\n/ # vault login -method=github\nGitHub Personal Access Token (will be hidden):\nSuccess! You are now authenticated. The token information displayed below\nis already stored in the token helper. You do NOT need to run \"vault login\"\nagain. Future Vault requests will automatically use this token.\n\nKey                    Value\n---                    -----\ntoken                  s.PNAXXXXXXXXXXXXXXXXXXX.hbtct\ntoken_accessor         KTqKKXXXXXXXXXXXXXXXXXXX.hbtct\ntoken_duration         1h\n...\n
    4. input your GH personal access token
    5. Set the returned token in step 4) into /config/common.config -> vault_token=\"s.PNAXXXXXXXXXXXXXXXXXXX.hbtct\"

    NOTE: the admin token from https://portal.cloud.hashicorp.com/ will always work but it's use is discouraged for the nominated GH personal access token for security audit trail reasons

    "},{"location":"user-guide/ref-architecture-aws/features/identities/credentials-vault/#hashicorp-vault-ui-auth","title":"Hashicorp Vault UI Auth","text":"

    You can also manage your Vault instance via its UI. We'll present below screenshot to show an example using the Github personal access token, one of our supported auth methods.

    1. Generate a GitHub Personal Access Token: https://github.com/settings/tokens
    2. Click \u201cGenerate new token\u201c
    3. Under scopes, only select \"read:org\", under \"admin:org\"

    Open your preferred web browser choose Github auth method and paste your GH token and you'll be able to login to your instance.

    Figure: Vault HCP UI user authentication screen. (Source: binbash Leverage, \"Leverage Vault Reference Architecture auth method\", binbash Leverage Doc, accessed August 5th 2021)."},{"location":"user-guide/ref-architecture-aws/features/identities/credentials/","title":"AWS Credentials","text":"

    Three main sets of credentials are used to interact with the AWS environment. We called them bootstrap, management and security credentials.

    "},{"location":"user-guide/ref-architecture-aws/features/identities/credentials/#bootstrap-credentials","title":"bootstrap credentials","text":"

    These are temporary credentials used for the initial deployment of the architecture, and they should only be used for this purpose. Once this process is finished, management and security users should be the ones managing the environment.

    "},{"location":"user-guide/ref-architecture-aws/features/identities/credentials/#management-credentials","title":"management credentials","text":"

    management credentials are meant to carry the role of making all important administrative tasks in the environment (e.g. billing adjustments). They should be tied to a physical user in your organization.

    A user with these credentials will assume the role OrganizationAccountAccessRole when interacting the environment.

    "},{"location":"user-guide/ref-architecture-aws/features/identities/credentials/#security-credentials","title":"security credentials","text":"

    These credentials are the ones to be used for everyday maintenance and interaction with the environment. Users in the role of DevOps | SecOps | Cloud Engineer in your organization should use these credentials.

    A user with these credentials will assume te role DevOps when interacting with the environment.

    "},{"location":"user-guide/ref-architecture-aws/features/identities/credentials/#read-more","title":"Read More","text":"

    AWS reference links

    Consider the following AWS official links as reference:

    "},{"location":"user-guide/ref-architecture-aws/features/identities/gpg/","title":"GPG Keys","text":""},{"location":"user-guide/ref-architecture-aws/features/identities/gpg/#why-do-we-use-gpg-keys","title":"Why do we use GPG keys?","text":"

    By default our Leverage Reference Architectre base-identities layer approach is to use IAM module to manage AWS IAM Users credentials with encryption to grant strong security.

    This module outputs commands and GPG messages which can be decrypted either using command line to get AWS Web Console user's password and user's secret key.

    Notes for keybase users

    If possible, always use GPG encryption to prevent Terraform from keeping unencrypted password and access secret key in state file.

    Keybase pre-requisites

    When gpg_key is specified as keybase:username, make sure that the user public key has already been uploaded to the Reference Architecture base-identities layer keys folder

    "},{"location":"user-guide/ref-architecture-aws/features/identities/gpg/#managing-your-gpg-keys","title":"Managing your GPG keys","text":"

    Create a key pair

    Delete a key pair

    Export your public key

    Decrypt your encrypted password

    1. The user should copy the encrypted password from whatever media it was provided to her/him
    2. Run echo \"YOUR ENCRYPTED STRING PASSWORD HERE\" | base64 --decode > a_file_with_your_pass
      $ echo \"wcBMA/ujy1wF7UPcAQgASLL/x8zz7OHIP+EHU7IAZfa1A9qD9ScP5orK1M473WlXVgPrded0iHpyZRwsJRS8Xe38AHZ65O6CnywdR522MbD\\\nRD6Yz+Bfc9NwO316bfSoTpyROXvMi+cfMEcihInHaCIP9YWBaI3eJ6VFdn90g9of00HYehBux7E2VitMuWo+v46W1p8/pw0b0H5qcppnUYYOjjSbjzzAuMF\\\nyNB5M1K8av61bPQPQTxBH3SFaM0B4RNmUl1bHKDIcdESYyIP/PRLQ45Rs5MzGgALIxBy24qdPNjHJQR48/5QV4nzB9qeEe4eWDB4ynSEfLsXggiz8fsbajV\\\ngSLNsdpqP9lYaueFdLgAeR6a+EjvqZfq0hZAgoiymsb4Qtn4A7gmeGmNeDE4td1mVfgzuTZ9zhnSbAYlXNIiM4b0MeX4HrjFkT/Aq+A/rvgBeKhszWD4Ibh\\\nA4PgC+QPiJRb5kQ/mX8DheQfAHJ24iUZk1jh6AsA\" | base64 --decode > encrypted_pass\n
    3. Run gpg --decrypt a_file_with_your_pass (in the path you've executed 2.) to effectively decrypt your pass using your gpg key and its passphrase
      $ gpg --decrypt encrypted_pass\n\nYou need a passphrase to unlock the secret key for\nuser: \"Demo User (AWS org project-user acct gpg key w/ passphrase) <username.lastname@domain.com>\"\n2048-bit RSA key, ID 05ED43DC, created 2019-03-15 (main key ID D64DD59F)\n\ngpg: encrypted with 2048-bit RSA key, ID 05ED43DC, created 2019-03-15\n      \"Demo User (AWS org project-user acct gpg key w/ passphrase) <username.lastname@domain.com>\"\nVi0JA|c%fP*FhL}CE-D7ssp_TVGlf#%\n
      Depending on your shell version an extra % character could appear as shown below, you must disregard this character since it's not part of the Initial (one time) AWS Web Console password.
    4. If all went well, the decrypted password should be there
    "},{"location":"user-guide/ref-architecture-aws/features/identities/gpg/#workaround-for-mac-users","title":"Workaround for Mac users","text":"

    There are some situations where gpg keys generated on Mac don't work properly, generating errors like the following:

    \u2577\n\u2502 Error: error encrypting password during IAM User Login Profile (user.lastname) creation: Error encrypting Password: error parsing given PGP key: openpgp: unsupported feature: unsupported oid: 2b060104019755010501\n\u2502 \n\u2502   with module.user[\"user.lastname\"].aws_iam_user_login_profile.this[0],\n\u2502   on .terraform/modules/user/modules/iam-user/main.tf line 12, in resource \"aws_iam_user_login_profile\" \"this\":\n\u2502   12: resource \"aws_iam_user_login_profile\" \"this\" {\n\u2502\n

    Docker is required for this workaround.

    If you don't have docker on your PC, don't worry. You can easily install it following the steps on the official page.

    In these cases, execute the following steps:

    1. Run an interactive console into an ubuntu container mounting your gpg directory.

      docker run --rm -it --mount type=bind,src=/Users/username/.gnupg,dst=/root/.gnupg ubuntu:latest\n

    2. Inside the container, install required packages.

      apt update\napt install gnupg\n

    3. Generate the key as described in previous sections, running gpg --gen-key at the interactive console in the ubuntu container.

    4. To fix permissions in your gpg directory, run these commands at the interactive console in the ubuntu container.

      find ~/.gnupg -type f -exec chmod 600 {} \\;\nfind ~/.gnupg -type d -exec chmod 700 {} \\;\n

    5. Now you should be able to export the gpg key and decode the password from your mac, running gpg --export \"Your Name\" | base64.

    6. Finally, decrypt the password in your mac, executing:

      echo \"YOUR ENCRYPTED STRING PASSWORD HERE\" | base64 --decode > a_file_with_your_pass\ngpg --decrypt a_file_with_your_pass\n

    "},{"location":"user-guide/ref-architecture-aws/features/identities/identities/","title":"Identity and Access Management (IAM) Layer","text":""},{"location":"user-guide/ref-architecture-aws/features/identities/identities/#setting-up-user-credentials","title":"Setting up user credentials","text":"

    Please follow the steps below to orchestrate your base-identities layer 1st in your project-root AWS account and afterwards in your project-security account.

    IAM user standard creation workflow

    1. Pre-requisite add Public PGP Key following the documentation
    2. For steps 3. and 4. consider following Leverage's Terraform workflow
    3. Update (add | remove) your IAM Users associated code and deploy security/global/base-identities/users.tf
      • Consider customizing your account Alias and Password Policy
    4. Update (add | remove | edit) your IAM Groups associated code and deploy security/global/base-identities/groups.tf
    5. Get and share the IAM Users AWS Console user id and its OTP associated password from the make apply outputs
      • temporally set sensitive = false to get the encrypted outputs in your prompt output.
    6. Each user will need to decrypt its AWS Console Password, you could share the associated documentation with them.
    7. Users must login to the AWS Web Console (https://project-security.signin.aws.amazon.com/console) with their decrypted password and create new pass
    8. Activate MFA for Web Console (Optional but strongly recommended)
    9. User should create his AWS ACCESS KEYS if needed
    10. User could optionally set up\u00a0~/.aws/project/credentials\u00a0+\u00a0~/.aws/project/config following the immediately below AWS Credentials Setup sub-section
    11. To allow users to Access AWS Organization member account consider repeating step 3. but for the corresponding member accounts:
      • shared/global/base-identities
      • apps-devstg/global/base-identities
      • app-prd/global/base-identities
    "},{"location":"user-guide/ref-architecture-aws/features/identities/identities/#recommended-post-tasks","title":"Recommended Post-tasks","text":"

    Deactivating AWS STS in not in use AWS Region

    When you activate STS endpoints for a Region, AWS STS can issue temporary credentials to users and roles in your account that make an AWS STS request. Those credentials can then be used in any Region that is enabled by default or is manually enabled. You must activate the Region in the account where the temporary credentials are generated. It does not matter whether a user is signed into the same account or a different account when they make the request.

    To activate or deactivate AWS STS in a Region that is enabled by default (console)

    1. Sign in as a root user or an IAM user with permissions to perform IAM administration tasks.
    2. Open the IAM console and in the navigation pane choose Account settings.
    3. If necessary, expand Security Token Service (STS), find the Region that you want to activate, and then choose Activate or Deactivate. For Regions that must be enabled, we activate STS automatically when you enable the Region. After you enable a Region, AWS STS is always active for the Region and you cannot deactivate it. To learn how to enable a Region, see Managing AWS Regions in the AWS General Reference.

    Source | AWS Documentation IAM User Guide | Activating and deactivating AWS STS in an AWS Region

    Figure: Deactivating AWS STS in not in use AWS Region. Only in used Regions must have STS activated.

    "},{"location":"user-guide/ref-architecture-aws/features/identities/identities/#next-steps","title":"Next Steps","text":"

    Setup your AWS Credentials

    "},{"location":"user-guide/ref-architecture-aws/features/identities/overview/","title":"Identity and Access Management (IAM)","text":""},{"location":"user-guide/ref-architecture-aws/features/identities/overview/#overview","title":"Overview","text":"

    Having this official AWS resource as reference we've define a security account structure for managing multiple accounts.

    User Management Definitions

    Why multi account IAM strategy?

    Creating a security relationship between accounts makes it even easier for companies to assess the security of AWS-based deployments, centralize security monitoring and management, manage identity and access, and provide audit and compliance monitoring services

    Figure: AWS Organization Security account structure for managing multiple accounts (just as reference). (Source: Yoriyasu Yano, \"How to Build an End to End Production-Grade Architecture on AWS Part 2\", Gruntwork.io Blog, accessed November 18th 2020)."},{"location":"user-guide/ref-architecture-aws/features/identities/overview/#iam-groups-roles-definition","title":"IAM Groups & Roles definition","text":"

    AWS Org member accounts IAM groups :

    Account Name AWS Org Member Accounts IAM Groups Admin Auditor DevOps DeployMaster project-management x project-security x x x x

    AWS Org member accounts IAM roles :

    Account Name AWS Org Member Accounts IAM Roles Admin Auditor DevOps DeployMaster OrganizationAccountAccessRole project-management x project-security x x x x project-shared x x x x x project-legacy x x x project-apps-devstg x x x x x project-apps-prd x x x x x"},{"location":"user-guide/ref-architecture-aws/features/identities/roles/","title":"IAM Roles","text":"

    What are AWS IAM Roles?

    For the Leverage AWS Reference Architecture we heavily depend on AWS IAM roles, which is a standalone IAM entity that:

    The two most common use cases for IAM roles are

    "},{"location":"user-guide/ref-architecture-aws/features/identities/roles/#how-iam-roles-work","title":"How IAM roles work?","text":"Figure: Example of AWS cross-account AWS access. (Source: Kai Zhao, \"AWS CloudTrail Now Tracks Cross-Account Activity to Its Origin\", AWS Security Blog, accessed November 17th 2020).

    Main IAM Roles related entities

    "},{"location":"user-guide/ref-architecture-aws/features/identities/roles/#iam-policies","title":"IAM policies","text":"

    Just as you can attach IAM policies to an IAM user and IAM group, you can attach IAM policies to an IAM role.

    "},{"location":"user-guide/ref-architecture-aws/features/identities/roles/#trust-policy","title":"Trust policy","text":"

    You must define a trust policy for each IAM role, which is a JSON document (very similar to an IAM policy) that specifies who can assume this IAM role. For example, we present below a trust policy that allows this IAM role to be assumed by an IAM user named John in AWS account 111111111111:

    {\n\"Version\": \"2012-10-17\",\n\"Statement\": [\n{\n\"Effect\": \"Allow\",\n\"Action\": \"sts:AssumeRole\",\n\"Principal\": {\"AWS\": \"arn:aws:iam::111111111111:user/John\"}\n}\n]\n}\n
    Note that a trust policy alone does NOT automatically give John permissions to assume this IAM role. Cross-account access always requires permissions in both accounts (2 way authorization). So, if John is in AWS account 111111111111 and you want him to have access to an IAM role called DevOps in account B ID 222222222222, then you need to configure permissions in both accounts: 1. In account 222222222222, the DevOps IAM role must have a trust policy that gives sts:AssumeRole permissions to AWS account A ID 111111111111 (as shown above). 2. 2nd, in account A 111111111111, you also need to attach an IAM policy to John\u2019s IAM user that allows him to assume the DevOps IAM role, which might look like this:

    {\n\"Version\": \"2012-10-17\",\n\"Statement\": [\n{\n\"Effect\": \"Allow\",\n\"Action\": \"sts:AssumeRole\",\n\"Resource\": \"arn:aws:iam::222222222222:role/DevOps\"\n}\n]\n}\n
    "},{"location":"user-guide/ref-architecture-aws/features/identities/roles/#assuming-an-aws-iam-role","title":"Assuming an AWS IAM role","text":"

    How does it work?

    IAM roles do not have a user name, password, or permanent access keys. To use an IAM role, you must assume it by making an AssumeRole API call (v\u00eda SDKs API, CLI or Web Console, which will return temporary access keys you can use in follow-up API calls to authenticate as the IAM role. The temporary access keys will be valid for 1-12 hours (depending on your current validity expiration config), after which you must call AssumeRole again to fetch new temporary keys. Note that to make the AssumeRole API call, you must first authenticate to AWS using some other mechanism.

    For example, for an IAM user to assume an IAM role, the workflow looks like this:

    Figure: Assuming an AWS IAM role. (Source: Gruntwork.io, \"How to configure a production-grade AWS account structure using Gruntwork AWS Landing Zone\", Gruntwork.io Production deployment guides, accessed November 17th 2020).

    Basic AssumeRole workflow

    1. Authenticate using the IAM user\u2019s permanent AWS access keys
    2. Make the AssumeRole API call
    3. AWS sends back temporary access keys
    4. You authenticate using those temporary access keys
    5. Now all of your subsequent API calls will be on behalf of the assumed IAM role, with access to whatever permissions are attached to that role

    IAM roles and AWS services

    Most AWS services have native support built-in for assuming IAM roles.

    For example:

    "},{"location":"user-guide/ref-architecture-aws/features/identities/roles/#read-more","title":"Read more","text":"

    AWS reference links

    Consider the following AWS official links as reference:

    "},{"location":"user-guide/ref-architecture-aws/features/monitoring/apm/","title":"Application Performance Monitoring (APM) and Business Performance","text":"

    Custom Prometheus BlackBox Exporter + Grafana & Elastic Application performance monitoring (APM) delivers real-time and trending data about your web application's performance and the level of satisfaction that your end users experience. With end to end transaction tracing and a variety of color-coded charts and reports, APM visualizes your data, down to the deepest code levels. Your DevOps teams don't need to guess whether a performance blocker comes from the app itself, CPU availability, database loads, or something else entirely unexpected. With APM, you can quickly identify potential problems before they affect your end users.

    APM's user interface provides both current and historical information about memory usage, CPU utilization, database query performance, web browser rendering performance, app availability and error analysis, external services, and other useful metrics.

    "},{"location":"user-guide/ref-architecture-aws/features/monitoring/apm/#slis-kpis","title":"SLIs / KPIs","text":"

    Service Level Indicators (SLIs)

    KPI for business performance

    KPI for app and infrastructure teams

    "},{"location":"user-guide/ref-architecture-aws/features/monitoring/apm/#read-more","title":"Read More","text":""},{"location":"user-guide/ref-architecture-aws/features/monitoring/logs/","title":"Logs","text":""},{"location":"user-guide/ref-architecture-aws/features/monitoring/logs/#overview","title":"Overview","text":"

    Centralized Logs Solution

    For this purpose we propose the usage of Elasticsearch + Kibana for database and visualization respectively. By deploying the Fluentd daemonset on the Kubernetes clusters we can send all logs from running pods to Elasticsearch, and with \u2018beat\u2019 we can send specific logs for resources outside of Kubernetes. There will be many components across the environment generating different types of logs: ALB access logs, s3 access logs, cloudfront access logs, application request logs, application error logs. Access logs on AWS based resources can be stored in a centralized bucket for that purpose, on the security account and given the need these can be streamed to Elasticsearch as well if needed.

    Figure: Monitoring metrics and log architecture diagram (just as reference). (Source: binbash Leverage, \"AWS Well Architected Reliability Report example\", binbash Leverage Doc, accessed November 18th 2020).

    Alerting based on Logs

    Certain features that were only available under licence were recently made available by Elastic, and included in the open source project of Elasticsearch. Elastalert allow us to generate alerts based on certain log entries or even after counting a certain amount of a type of entry, providing great flexibility.

    --

    "},{"location":"user-guide/ref-architecture-aws/features/monitoring/logs/#alternatives-comparison-table","title":"Alternatives Comparison Table","text":"

    Leverage Confluence Documentation

    You'll find here a detailed comparison table between EC2 Self-hosted and AWS ElasticSearch Elastic-Kibana Stack.

    "},{"location":"user-guide/ref-architecture-aws/features/monitoring/metrics/","title":"Metrics","text":"

    There are metrics that are going to be of interest both in the infrastructure itself (CPU, Memory, disk) and also on application level (amount of non 200 responses, latency, % of errors) and we will have two key sources for this: Prometheus and AWS CloudWatch metrics.

    Metric collectors

    Figure: Monitoring metrics and log architecture diagram (just as reference). (Source: binbash Leverage, \"AWS Well Architected Reliability Report example\", binbash Leverage Doc, accessed November 18th 2020).

    Graphing metrics

    Grafana is the standard open source visualization tool which can be used on top of a variety of different data stores. It can use prometheus as a source, and there are many open source dashboards and plugins available that provide great visualization of how things are running, and we can also build our own if necessary. If something is left out of prometheus and already available in Cloudwatch metrics we can easily integrate it as a source for Grafana as well, and build dashboards that integrate these metrics and even do some intelligence on them coming from multiple origins.

    Figure: Grafana K8s cluster metrics monitoring dashboard reference screenshot. (Source: DevOpsProdigy, \"Grafana DevOpsProdigy KubeGraf Plugin\", Grafana plugins, accessed November 18th 2020).

    Figure: Grafana K8s cluster metrics monitoring dashboard reference screenshot. (Source: DevOpsProdigy, \"Grafana DevOpsProdigy KubeGraf Plugin\", Grafana plugins, accessed November 18th 2020).

    Alerting based on metrics

    Although Grafana already has alerting capabilities built in, we rather (most of the times) have Prometheus alerting engine configured, because we can have really customize and specify alerts. We can have them as code in their extremely readable syntax. Example:

    Figure: Prometheus Alert Manager `CriticalRamUsage` alert screenshot (just as reference). (Source: binbash Leverage)."},{"location":"user-guide/ref-architecture-aws/features/monitoring/monitoring/","title":"SRE & Monitoring: Metrics, Logs & Tracing","text":""},{"location":"user-guide/ref-architecture-aws/features/monitoring/monitoring/#overview","title":"Overview","text":"

    There are two key approaches that we will cover with the proposed tools, Logs based monitoring and Metrics based monitoring.

    Monitoring tools

    Metrics: Prometheus - node-exporter - blackbox-exporter - alert-manager

    Metrics Dashboard: Grafana

    Centralized Logs: Elasticsearch-Fluent-Kibana (EFK)

    Distributed Tracing: Jaeger + Opensensus

    "},{"location":"user-guide/ref-architecture-aws/features/monitoring/notification_escalation/","title":"Notification & Escalation Procedure","text":""},{"location":"user-guide/ref-architecture-aws/features/monitoring/notification_escalation/#overview","title":"Overview","text":"Urgency Service Notification Setting Use When Response High 24/7 High-priority PagerDuty Alert 24/7/365 diff --git a/user-guide/cookbooks/VPC-with-no-LandingZone/index.html b/user-guide/cookbooks/VPC-with-no-LandingZone/index.html index 6c7d7b82..0a9a669b 100644 --- a/user-guide/cookbooks/VPC-with-no-LandingZone/index.html +++ b/user-guide/cookbooks/VPC-with-no-LandingZone/index.html @@ -3983,6 +3983,8 @@ + + @@ -4286,6 +4288,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/user-guide/cookbooks/VPN-server/index.html b/user-guide/cookbooks/VPN-server/index.html new file mode 100644 index 00000000..17ff6e17 --- /dev/null +++ b/user-guide/cookbooks/VPN-server/index.html @@ -0,0 +1,5568 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + VPN Server - binbash Leverageâ„¢ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + +
    + + +
    + +
    + + + + + + + + + +
    +
    + + + +
    +
    +
    + + + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + + + + + + + + + +

    How to create a VPN Server ¶

    +

    Goal ¶

    +

    To create a VPN server to access all the private networks (or at least, those ones "peered" to the VPN one) in the Organization.

    +

    Assumptions ¶

    +

    We are assuming the binbash Leverage Landing Zone is deployed, apps-devstg and shared were created and region us-east-1 is being used. In any case you can adapt these examples to other scenarios.

    +
    +
    +

    How to ¶

    +

    As per binbash Leverage Landing Zone defaults, the VPN server will be created in a public network of the shared base-network VPC.

    +

    It is a "Pritunl" server.

    +

    All the networks that should be accessible from the VPN must:

    +
      +
    • be "peered" to the shared base-network VPC
    • +
    • their CIDR have to be added to the "Pritunl" server
    • +
    +

    This Pritunl server will be deployed in an EC2 instance.

    +

    Note this instance can be started/stopped in an scheduled fashion, see here for more info. (Note also, if no EIP is being used, when the instance is stopped and then started again the IP will change.)

    +

    These are the steps:

    +
      +
    • create the EC2 instance
    • +
    • deploy Pritunl
    • +
    • configure Pritunl
    • +
    +

    Create the EC2 ¶

    +

    Copy the layer ¶

    +

    A few methods can be used to download the VPN Server layer directory into the binbash Leverage project.

    +

    E.g. this addon is a nice way to do it.

    +

    Paste this layer into the account/region chosen to host this, e.g. shared/us-east-1/, so the final layer is shared/us-east-1/tools-vpn-server/.

    +
    +

    Info

    +

    As usual when copying a layer this way, remove the file common-variables.tf and soft-link it to your project level one. E.g. rm common-variables.tf && ln -s ../../../config/common-variables.tf common-variables.tf.

    +
    +

    Update the layer ¶

    +

    Change as per your needs. At a minimum, change the S3 backend key in config.tf file and in file ec2.tf update the objects dns_records_public_hosted_zone and dns_records_internal_hosted_zone with your own domain.

    +

    Also, temporarily, allow access to port 22 (SSH) from Internet, so we can access the instance with Ansible.

    +

    To do this, in file ec2.tf chage this:

    +
        {
    +      from_port = 22, # SSH
    +      to_port   = 22,
    +      protocol  = "tcp",
    +      #cidr_blocks = ["0.0.0.0/0"],
    +      cidr_blocks = [data.terraform_remote_state.vpc.outputs.vpc_cidr_block],
    +      description = "Allow SSH"
    +    },
    +
    +

    ...to this:

    +
        {
    +      from_port = 22, # SSH
    +      to_port   = 22,
    +      protocol  = "tcp",
    +      cidr_blocks = ["0.0.0.0/0"],
    +      #cidr_blocks = [data.terraform_remote_state.vpc.outputs.vpc_cidr_block],
    +      description = "Allow SSH"
    +    },
    +
    +

    ...and this:

    +
      /*  dns_records_public_hosted_zone = [{
    +    zone_id = data.terraform_remote_state.dns.outputs.aws_public_zone_id[0],
    +    name    = "vpn.aws.binbash.co",
    +    type    = "A",
    +    ttl     = 300
    +  }]*/
    +
    +

    ...to this:

    +
      dns_records_public_hosted_zone = [{
    +    zone_id = data.terraform_remote_state.dns.outputs.public_zone_id,
    +    name    = "vpn.binbash.co",
    +    type    = "A",
    +    ttl     = 300
    +  }]
    +
    +

    Also, to access in port 443 this need to be changed from this:

    +
        {
    +      from_port = 443, # Pritunl VPN Server UI
    +      to_port   = 443,
    +      protocol  = "tcp",
    +      #cidr_blocks = ["0.0.0.0/0"], # Public temporally accessible for new users setup (when needed)
    +      cidr_blocks = [data.terraform_remote_state.vpc.outputs.vpc_cidr_block],
    +      description = "Allow Pritunl HTTPS UI"
    +    },
    +
    +

    to this

    +
        {
    +      from_port = 443, # Pritunl VPN Server UI
    +      to_port   = 443,
    +      protocol  = "tcp",
    +      cidr_blocks = ["0.0.0.0/0"], # Public temporally accessible for new users setup (when needed)
    +      #cidr_blocks = [data.terraform_remote_state.vpc.outputs.vpc_cidr_block],
    +      description = "Allow Pritunl HTTPS UI"
    +    },
    +
    +
    +

    Info

    +

    Replace the domain with your own. A public record is being created so we can reach the server.

    +
    +

    Apply the layer ¶

    +

    As usual:

    +
    leverage tf init 
    +leverage tf apply
    +
    +

    Deploy Pritunl ¶

    +

    SSH connection ¶

    +

    To do this SSH access is needed. For this a new key pair will be created to allow you to run ansible.

    +

    Create a key pair, more here, or use an already created one.

    +

    Store the keys in a safe place.

    +

    Copy the content of the public key.

    +

    Access the EC2 instance from AWS Web Console. (use the shared account)

    +

    Connect to the instance (connect button) using SSM (Session Manager).

    +

    Once in the instance terminal, paste the copied content in the to the authorized keys file:

    +
    sudo bash -c 'echo "ssh-ed25519 yourkey binbhash-AWS-instances" >> /home/ubuntu/.ssh/authorized_keys'
    +
    +

    Replace the content between double quotes with your private key content

    +

    Check the connection:

    +
    ssh -i <path-2-your-private-key> ubuntu@<the-instance-public-ip>
    +
    +

    When prompted to accept the fingerprint, type "yes" and hit enter.

    +
    +

    Info

    +

    ubuntu is the default user for the default image used here, if needed change it.

    +
    +
    +

    Info

    +

    In this command you can use the public URL that was set, in the example vpn.binbash.co.

    +
    +

    Run Ansible ¶

    +
    +

    Info

    +

    It seems to be obvious but... you need Ansible installed.

    +
    +

    This Ansible repo will be used here: Pritunl VPN Server Playbook

    +
    +

    Note

    +

    This is a private repository, please get in touch with us to get access to it!

    + +
    +

    Copy the playbooks into your project repository. (e.g. you can create an ansible directory inside your binbash Leverage project repository, so all your infraesctructure code is in the same place)

    +

    cd into the ansible-pritunl-vpn-server (or the name you've chosen) directory.

    +

    Follow the steps in the repository README file to install the server.

    +

    Connect and configure the server ¶

    +

    ssh into the server and run this command:

    +
    sudo pritunl default-password
    +
    +

    Grab the user and password and use them as credentials in the web page at your public domain!

    +

    In the initial setup page and change the password and enter the domain in "Lets Encrypt Domain".

    +

    Hit Save.

    +
    A user and an organization ¶
    +

    First things first, add a user.

    +

    Go to Users.

    +

    Hit Add Organization.

    +

    Enter a name and hit Add.

    +

    Now Add User.

    +

    Enter a name, select the organization, enter an email and let the pin empty.

    +

    Hit Add.

    +
    A new server ¶
    +

    Now add a server to log into.

    +

    Go to Servers and hit "Add Server".

    +

    Enter the name, check "Enable Google Authenticator" and add it.

    +
    +

    Info

    +

    Note the Port and Protocol has to be in the range stated in the VPN Server layer, in the ec2.tf file under this block: +

    {
    +  from_port   = 15255, # Pritunl VPN Server public UDP service ports -> pritunl.server.admin org
    +  to_port     = 15257, # Pritunl VPN Server public UDP service ports -> pritunl.server.devops org
    +  protocol    = "udp",
    +  cidr_blocks = ["0.0.0.0/0"],
    +  description = "Allow Pritunl Service"
    +}
    +

    +
    +

    Hit Attach Organization and attach the organization you've created.

    +

    Hit Attach.

    +

    Now hit Start Server.

    +
    A note on AWS private DNS ¶
    +

    To use a Route53 private zone (where your private addresses are set), these steps have to be followed:

    +
      +
    • Edit the server
    • +
    • In the "DNS Server" box (where 8.8.8.8 is set) add the internal DNS for the VPC
    • +
    • the internal DNS is x.x.x.2, e.g. if the VPC in which your VPN Server is is 172.18.0.0/16, then your DNS is 172.18.0.2
    • +
    • for the example, the final text is 172.18.0.2, 8.8.8.8 (note we are adding the 8.8.8.8 as a secondary DNS)
    • +
    • Add a specific route for the DNS server, for the example 172.18.0.2/32
    • +
    • Then add all the other routes you need to access your resources, e.g. to access the VPN Server's VPC this route must be added: 172.18.0.0/16
    • +
    +
    Use the user to log into the VPN ¶
    +

    Go to Users.

    +

    Click the chain icon (Temporary Profile Link) next to the user.

    +

    Copy the "Temporary url to view profile links, expires after 24 hours" link and send it to the user.

    +

    The user should open the link.

    +

    The user has to create an OTP with an app such as Authy, enter a PIN, copy the "Profile URI Link" and enter it in the "import > profile URI" in the Pritunl Client.

    +

    Start the VPN and enjoy being secure!

    +

    Set back security ¶

    +

    Set back all the configurations to access the server and apply the layer:

    +
        {
    +      from_port = 22, # SSH
    +      to_port   = 22,
    +      protocol  = "tcp",
    +      #cidr_blocks = ["0.0.0.0/0"],
    +      cidr_blocks = [data.terraform_remote_state.vpc.outputs.vpc_cidr_block],
    +      description = "Allow SSH"
    +    },
    +
    +  /*  dns_records_public_hosted_zone = [{
    +    zone_id = data.terraform_remote_state.dns.outputs.aws_public_zone_id[0],
    +    name    = "vpn.aws.binbash.co",
    +    type    = "A",
    +    ttl     = 300
    +  }]*/
    +
    +    {
    +      from_port = 443, # Pritunl VPN Server UI
    +      to_port   = 443,
    +      protocol  = "tcp",
    +      #cidr_blocks = ["0.0.0.0/0"], # Public temporally accessible for new users setup (when needed)
    +      cidr_blocks = [data.terraform_remote_state.vpc.outputs.vpc_cidr_block],
    +      description = "Allow Pritunl HTTPS UI"
    +    },
    +
    +

    Note about Routes ¶

    +

    When you create a Pritunl VPN server, a VPN network CIDR is used, let's say 192.168.122.0/24.

    +

    So, all the clients connecting to the VPN will be assigned with an IP in this range.

    +

    The VPN Server, at the same time, is living in a network, with its own IP in a given range, e.g. 10.20.0.0/16, and it has a public IP.

    +

    Clients will be connecting to the public IP and reiceiving a VPN IP.

    +

    Now, we need to route the traffic.

    +

    Let's say you have an internal network (to which the VPN Server has access) in the range 10.40.0.0/16.

    +

    If you want the VPN clients to reach this network, you must Add a Route to the VPN server.

    +

    Go to the Servers page.

    +

    Stop the server you want to add the route to.

    +

    Hit Add Route.

    +

    Fill the CIDR (e.g. 192.168.69.0/24), select the server name and hit Attach.

    +

    Start the server.

    +

    Also note the route 0.0.0.0./0 is added by default. This means all traffic go through the VPN server.

    +

    You can remove this and allow just the internal CIDRs.

    +

    Lets Encrypt Domain ¶

    +
      +
    • must temporally open port 80 to the world (line 52)
    • +
    • must temporally open port 443 to the world (line 59)
    • +
    • must uncomment public DNS record block (lines 105-112)
    • +
    • make apply
    • +
    • connect to the VPN and ssh to the Pritunl EC2
    • +
    • run '$sudo pritunl reset-ssl-cert'
    • +
    • force SSL cert update (manually via UI or via API call) + in the case of using the UI, set the "Lets Encrypt Domain" field with the vpn domain and click on save
    • +
    • rollback steps a,b & c + make apply
    • +
    + + + + + + + + +
    +
    + + +
    + + + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/user-guide/cookbooks/argocd-external-cluster/index.html b/user-guide/cookbooks/argocd-external-cluster/index.html index b600f989..a65cb200 100644 --- a/user-guide/cookbooks/argocd-external-cluster/index.html +++ b/user-guide/cookbooks/argocd-external-cluster/index.html @@ -18,7 +18,7 @@ - + @@ -3983,6 +3983,8 @@ + + @@ -4352,6 +4354,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + @@ -5649,13 +5671,13 @@

    References diff --git a/work-with-us/leverage-vs-competition/index.html b/work-with-us/leverage-vs-competition/index.html index 0ab2d004..e49c7bd2 100644 --- a/work-with-us/leverage-vs-competition/index.html +++ b/work-with-us/leverage-vs-competition/index.html @@ -6157,6 +6157,8 @@ + + @@ -6338,6 +6340,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/work-with-us/releases/releases-and-versions/index.html b/work-with-us/releases/releases-and-versions/index.html index a7e5c67a..03be0c65 100644 --- a/work-with-us/releases/releases-and-versions/index.html +++ b/work-with-us/releases/releases-and-versions/index.html @@ -3979,6 +3979,8 @@ + + @@ -4160,6 +4162,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/work-with-us/releases/versions-compatibility-matrix/index.html b/work-with-us/releases/versions-compatibility-matrix/index.html index 2af3d215..0b5e9423 100644 --- a/work-with-us/releases/versions-compatibility-matrix/index.html +++ b/work-with-us/releases/versions-compatibility-matrix/index.html @@ -3979,6 +3979,8 @@ + + @@ -4160,6 +4162,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/work-with-us/roadmap/leverage-cli/overview/index.html b/work-with-us/roadmap/leverage-cli/overview/index.html index 12bf0bd2..3a198e89 100644 --- a/work-with-us/roadmap/leverage-cli/overview/index.html +++ b/work-with-us/roadmap/leverage-cli/overview/index.html @@ -3979,6 +3979,8 @@ + + @@ -4160,6 +4162,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/work-with-us/roadmap/ref-arch/cost-optimization/index.html b/work-with-us/roadmap/ref-arch/cost-optimization/index.html index 2080190c..0031ffd9 100644 --- a/work-with-us/roadmap/ref-arch/cost-optimization/index.html +++ b/work-with-us/roadmap/ref-arch/cost-optimization/index.html @@ -3979,6 +3979,8 @@ + + @@ -4160,6 +4162,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/work-with-us/roadmap/ref-arch/demo-apps/index.html b/work-with-us/roadmap/ref-arch/demo-apps/index.html index 4a1cb5c6..47378543 100644 --- a/work-with-us/roadmap/ref-arch/demo-apps/index.html +++ b/work-with-us/roadmap/ref-arch/demo-apps/index.html @@ -3979,6 +3979,8 @@ + + @@ -4160,6 +4162,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/work-with-us/roadmap/ref-arch/operational-excellence/index.html b/work-with-us/roadmap/ref-arch/operational-excellence/index.html index e974cbe5..2a7f4536 100644 --- a/work-with-us/roadmap/ref-arch/operational-excellence/index.html +++ b/work-with-us/roadmap/ref-arch/operational-excellence/index.html @@ -3979,6 +3979,8 @@ + + @@ -4160,6 +4162,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/work-with-us/roadmap/ref-arch/overview/index.html b/work-with-us/roadmap/ref-arch/overview/index.html index 28a00ca8..f02cbe6b 100644 --- a/work-with-us/roadmap/ref-arch/overview/index.html +++ b/work-with-us/roadmap/ref-arch/overview/index.html @@ -3979,6 +3979,8 @@ + + @@ -4160,6 +4162,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/work-with-us/roadmap/ref-arch/reliability-performance/index.html b/work-with-us/roadmap/ref-arch/reliability-performance/index.html index 2e00a595..283a0fa8 100644 --- a/work-with-us/roadmap/ref-arch/reliability-performance/index.html +++ b/work-with-us/roadmap/ref-arch/reliability-performance/index.html @@ -3979,6 +3979,8 @@ + + @@ -4160,6 +4162,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/work-with-us/roadmap/ref-arch/security/index.html b/work-with-us/roadmap/ref-arch/security/index.html index f4242d23..ddb19fae 100644 --- a/work-with-us/roadmap/ref-arch/security/index.html +++ b/work-with-us/roadmap/ref-arch/security/index.html @@ -3979,6 +3979,8 @@ + + @@ -4160,6 +4162,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + + diff --git a/work-with-us/support/index.html b/work-with-us/support/index.html index 3f428084..c835fc03 100644 --- a/work-with-us/support/index.html +++ b/work-with-us/support/index.html @@ -3979,6 +3979,8 @@ + + @@ -4160,6 +4162,26 @@ + + + + + +
  • + + + + + VPN Server + + + + +
  • + + + +