diff --git a/.dockerignore b/.dockerignore index 4d9ca3e96db..513f9979366 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,6 +1,7 @@ * !playbooks/ !docker/build/* +!docker/devstack_common_ansible_overrides.yml !docker/build/*/*.yml docker/build/*/Dockerfile !docker/plays/ diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index ad646cbc65e..322bf24846b 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -3,9 +3,10 @@ Configuration Pull Request Make sure that the following steps are done before merging: - - [ ] A DevOps team member has approved the PR. + - [ ] A DevOps team member has approved the PR if it is code shared across multiple services and you don't own all of the services. - [ ] Are you adding any new default values that need to be overridden when this change goes live? If so: - [ ] Update the appropriate internal repo (be sure to update for all our environments) - [ ] If you are updating a secure value rather than an internal one, file a DEVOPS ticket with details. - [ ] Add an entry to the CHANGELOG. - [ ] If you are making a complicated change, have you performed the proper testing specified on the [Ops Ansible Testing Checklist](https://openedx.atlassian.net/wiki/display/EdxOps/Ops+Ansible+Testing+Checklist)? Adding a new variable does not require the full list (although testing on a sandbox is a great idea to ensure it links with your downstream code changes). + - [ ] Think about how this change will affect Open edX operators. Have you updated the wiki page for the next Open edX release? diff --git a/.travis.yml b/.travis.yml index 6dab193d79d..0a2fd073f3d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,13 +1,16 @@ # Travis CI configuration file for running tests --- language: python +dist: xenial python: - - "2.7" + - 3.5 + - 3.8 branches: only: - master - appsembler/hawthorn/master + - appsembler/juniper/master services: - docker @@ -16,27 +19,23 @@ addons: apt: packages: - nodejs - - python-demjson before_install: - sudo apt-get -y update - sudo apt-get -y install -o Dpkg::Options::="--force-confold" docker-ce install: - - "pip install -r requirements.txt" - "pip install demjson" + - "pip install -r requirements.txt" env: - MAKE_TARGET=test.syntax SHARD=0 SHARDS=1 - MAKE_TARGET=test.playbooks SHARD=0 SHARDS=1 - - MAKE_TARGET=docker.test.shard SHARD=0 SHARDS=3 - - MAKE_TARGET=docker.test.shard SHARD=1 SHARDS=3 - - MAKE_TARGET=docker.test.shard SHARD=2 SHARDS=3 script: - docker --version - make --version - - travis_wait 50 make --keep-going $MAKE_TARGET SHARD=$SHARD SHARDS=$SHARDS + - travis_wait 90 make --keep-going $MAKE_TARGET SHARD=$SHARD SHARDS=$SHARDS # Appsembler addition to automatically push # our hawthorn image @@ -46,3 +45,7 @@ after_success: travis_wait 50 docker build -f docker/build/edxapp/Dockerfile . -t appsembler/edxapp:latest ; docker push appsembler/edxapp:latest ; fi + +matrix: + allow_failures: + - python: 3.8 diff --git a/AUTHORS b/AUTHORS index f7ab1dab5aa..03e5648d2a0 100755 --- a/AUTHORS +++ b/AUTHORS @@ -58,3 +58,6 @@ Jillian Vogel Zubair Afzal Kyle McCormick Muzaffar Yousaf +Dave St.Germain +Rabia Iftikhar +Mostafa Hussein diff --git a/CHANGELOG.md b/CHANGELOG.md index 59569842b44..80855cf4846 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,13 +1,343 @@ -- Use Ansible 2.3.1.0 so that we can do shallow clones of tags. +# Changelog +All notable changes to this project will be documented in this file. +Add any new changes to the top(right below this line). + +- Role: edxapp BREAKING_CHANGE + - The sandbox environment that runs instructor written python code used to run python 2.7. We update the default to + python 3.5 but provide a new variable to be able to go back to the old setting. If `edxapp_sandbox_python_version` + is set to `python2.7` you will get the behavior from before this change. If you are running a deployment where + there is instructor written python graded assignments and you will need to updated that code before taking this + change. Or you will need to hold back the `edxapp_sandbox_python_version` until all instructor python code has been + updated. + +- Role: all + - Split the COMMON_SANDBOX_BUILD variable with its two components: SANDBOX_CONFIG and CONFIGURE_JWTS. + +- Role: edxapp + - enable paver autocomplete in docker devstack + +- Role: forum + - Added `FORUM_MONGO_AUTH_MECH` to allow the authentication mechanism to be configurable. + Must be set if user credentials are in the connection string, or use `""` if no user credentials required. + Defaults to `":scram"`, which is supported by Mongo>=3.0, because `":mongodb_cr"` is removed in Mongo>=4.0. + Use `":mongodb_cr"` for mongo 2.6. + +- Docker: edxapp + - Disable install of private requirements for docker devstack. + +- Roles: edx_django_service, registrar, enterprise_catalog + - Moved celery worker supervisor config files/scripts into edx_django_service + - Removed the following variables + - ENTERPRISE_CATALOG_WORKER_DEFAULT_STOPWAITSECS + - ENTERPRISE_CATALOG_CELERY_HEARTBEAT_ENABLED + - ENTERPRISE_CATALOG_WORKERS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING + - ENTERPRISE_CATALOG_NEWRELIC_WORKERS_APPNAME + - REGISTRAR_WORKER_DEFAULT_STOPWAITSECS + - REGISTRAR_CELERY_HEARTBEAT_ENABLED + - REGISTRAR_WORKERS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING + - REGISTRAR_NEWRELIC_WORKERS_APPNAME + +- Role: edxapp + - Added Stanford-developed Image Modal XBlock. + +- Role: edxapp + - Added Stanford-developed SQL Grader XBlock. + +- Role: mount_ebs + - Added check for disk size, size is now a required parameter in variables volumes and MONGO_VOLUMES + - This is to prevent mounting the wrong volumes when AWS swaps the order + +- Role: all + - Removed OPENID settings + +- Role: all + - Removed all settings with OIDC in name + +- Role: edxapp + - Added `ENTERPRISE_LEARNER_PORTAL_HOSTNAME` env var for lms. + +- Role: ecommerce + - Added `ENTERPRISE_LEARNER_PORTAL_HOSTNAME` env var for ecommerce. + +- Role: edxapp + - Added Stanford-developed Free Text Response XBlock. + +- Role: edxapp + - Added Stanford-developed Submit-and-Compare XBlock. + +- Role: edxapp + - Added Stanford-developed Qualtrics and In-Video Quiz XBlocks. + +- Open edX + - Don't use AWS_GATHER_FACTS, it was only for tagging which we don't need. + +- Open edX + - The wrong version of xqueue was being installed, fixed. + +- Role: enterprise_catalog + - Added infrstructure to start up and deploy celery workers + +- Role: edxapp + - Added Celery worker `prefetch_optimization` option to allow switching from 'default' to 'fair' (only write to available worker processes) + +- Role: insights + - install libssl-dev, needed for mysqlclient + +- Role: insights + - add DOT config (deprecate DOP) + +- Open edX + - native.sh needed to uninstall pyyaml to proceed + +- Role: enterprise_catalog + - Create role + +- Role: blockstore + - Increased upload limit to 10M + +- Role: ecommerce + - Fixed paypal payment processor default configuration + +- Role: edxapp + - Added `ENABLE_PUBLISHER` for indicating that the publisher frontend service is in use + +- Role: discovery + - Added `ENABLE_PUBLISHER` for indicating that the publisher frontend service is in use + +- Role: edxapp + - Added `ENABLE_ENROLLMENT_RESET` feature flag for masters integration sandboxes + +- Role: conductor + - New role added to configure the conductor service + +- Role: jwt_signature + - Added role to inject JWT signing keys into application config, used from edxapp, worker, and registrar. + +- Playbook: masters_sandbox_update + - Create edx partner + +- Playbook: program_manager + - Added playbook to setup program-manager micro-frontend application on sandboxes + +- Role: program_manager + - Created the program-manager role for micro-frontend application to be setup + +- Role: registrar + - Set CSRF_TRUSTED_ORIGINS. + +- Role: registrar + - Set CORS_ORIGIN_WHITELIST. + +- Role: discovery + - Override DISCOVERY_MYSQL_REPLICA_HOST to `edx.devstack.mysql` in docker. + +- Playbook: masters_sandbox + - Include call to create_api_access_request + +- Role: discovery + - Add mysql replica settings to env config. + +- Role: common_vars + - Default `COMMON_JWT_PUBLIC_SIGNING_JWK_SET` to `''` + instead of `!!null`. Because of how this setting is handled, + `!!null` ends up rendering as the literal string `None` instead + of the value `null`, which causes JSON decoding to fail + wherever the default value is used (as `'None'` is not valid JSON). + By setting the default to a Falsy value like the + empty string, edx-drf-extensions does not attempt to JSON- + decode it. + +- Playbook: masters_sandbox + - Added playbook to setup user and api access + +- Role: registrar + - Changed `REGISTRAR_CELERY_ALWAYS_EAGER` default to `false`. + +- Role: registrar + - Added `REGISTRAR_CELERY_ALWAYS_EAGER` with default `True`. + - Injected above settings as environment variable for Registrar. + +- Role: xserver + - Remove xserver from sandbox builds. + +- Role: oauth_client_setup + - Ensure that created DOT applications have corresponding ApplicationAccess records with user_id scope. + +- Role: edx_notes_api + - Added `EDX_NOTES_API_HOSTNAME` to set a hostname for the edx-notes-api IDA. + +- Open edX + - Added `SANDBOX_ENABLE_NOTES` to enable/disable setting up the edx-notes-api IDA. + +- Role: registrar + - Add registrar to sandbox builds. + +- Role: registrar + - Change default celery queue to `registrar.default`, explicitly set default exchange and routing key. + +- Role: designer + - Create role + +- Role: supervisor + - Add registrar to `pre_supervisor_checks.py` + +- Role: registrar + - Added `registrar-workers.conf.j2` + - Add task to generate `registrar-workers.conf` from `registrar-workers.conf.j2` + - Added `REGISTRAR_WORKERS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING` + - Added `REGISTRAR_WORKER_DEFAULT_STOPWAITSECS` + - Added `REGISTRAR_CELERY_HEARTBEAT_ENABLED` + - Added `REGISTRAR_NEWRELIC_WORKERS_APPNAME` + - Added `REGISTRAR_CELERY_WORKERS` + +- Role: registrar + - Added `REGISTRAR_CELERY_BROKER_TRANSPORT`. + - Added `REGISTRAR_CELERY_BROKER_USER`. + - Added `REGISTRAR_CELERY_BROKER_PASSWORD`. + - Added `REGISTRAR_CELERY_BROKER_HOSTNAME`. + - Added `REGISTRAR_CELERY_BROKER_VHOST`. + - Injected all above settings as environment variables for Registrar. + +- Role: registrar + - Added `REGISTRAR_API_ROOT` + - Modified `REGISTRAR_MEDIA_URL`. + +- Role: edx_django_service + - Added new overridable variable `edx_django_service_api_root` + +- Role: registrar + - Replaced `REGISTRAR_MEDIA_ROOT`. + - Added `REGISTRAR_MEDIA_STORAGE_BACKEND`. + +- Role: registrar + - Replaced `REGISTRAR_LMS_URL_ROOT` with `REGISTRAR_LMS_BASE_URL`. + - Replaced `REGISTRAR_DISCOVERY_API_URL` with `REGISTRAR_DISCOVERY_BASE_URL`. + +- Role: registrar + - Added `REGISTRAR_SEGMENT_KEY` for segment.io event tracking. + +- Role: registrar + - Added `REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_KEY` for oauth2. + - Added `REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_SECRET` for oauth2. + - Added `REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_KEY` for backend auth. + - Added `REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_SECRET` for backend auth. + - Added `REGISTRAR_SERVICE_USER_EMAIL` to have a registrar service user on LMS + - Added `REGISTRAR_SERVICE_USER_NAME` to have a registrar service user on LMS + +- Role: registrar + - Create role + +- Role: edxapp + - Added ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS to allow for edx specific query params to be added for business marketing footer. + +- Role: edxapp + - Removed the OfficeMix XBlock (the service that it uses has been dead for months). + +- Role: edxapp + - Added 'SYSTEM_WIDE_ROLE_CLASSES' for use of edx-rbac roles in the jwt in the lms + +- Open edX + - Renamed edx_sandbox.yml to openedx_native.yml + +- Role: ecomworker + - Added `assignment_email` default template value in `SAILTHRU` config to send offer assignment emails. + +- Role: nginx + - Added CORS Access-Control-Allow-Origin for static assets. + - Replaced wildcard Access-Control-Allow-Origin header for fonts. Make sure you set EDXAPP_CORS_ORIGIN_WHITELIST to include all your domains. + +- Role: nginx + - Modified robots.txt.j2 to accept the Allow rule. + - Modified robots.txt.j2 to accept either a single string or a list of strings for agent, disallow, and allow. + +- Role: ecommerce + - Added CORS_ORIGIN_WHITELIST and CORS_URLS_REGEX to allow selective CORS whitelisting of origins/urls. + +- common_vars + - Added new overridable variable `COMMON_LMS_BASE_URL`. + +- Role: discovery + - Added `DISCOVERY_CORS_ORIGIN_WHITELIST` to allow CORS whitelisting of origins. + +- abbey.py + - Removed abbey.py + +- Role: edxapp + - Renamed proctoring backend setting to work with edx-proctoring 1.5.0 + - Render auth and env config to a single yml file + +- Role: edxapp + - Remove low priority queue, use default instead. + +- Role: ecommerce + - Remove unused JWT_SECRET_KEYS. + +- Role: ecommerce + - Transformed the JWT_ISSUERS to match the format expected by edx-drf-extensions jwt_decode_handler. + +- Role: edxapp + - Added `ENTERPRISE_CUSTOMER_SUCCESS_EMAIL` to lms_env_config for configuring emails to the customer success team. + +- Role: edx_django_service + - Added new overridable variable `edx_django_service_gunicorn_max_requests` - Role: edxapp - `EDXAPP_EDXAPP_SECRET_KEY` no longer has a default value +- Role: ecommerce + - Set default max_requests to 3000.(eg. restart gunicorn process every 3000 requests.) + +- Role: edx_notes_api + - Added `JWT_AUTH` to edx-notes-api that is used in other IDAs. + +- Role: edxapp + - Removed `PASSWORD_MIN_LENGTH`, `PASSWORD_MAX_LENGTH`, and `PASSWORD_COMPLEXITY` in favor of specifying these in `AUTH_PASSWORD_VALIDATORS`. + +- Role: edxapp + - Added `AUTH_PASSWORD_VALIDATORS` to utilize Django's password validation. Base validators included in configuration are UserAttributeSimilarity to test the password against the username and email using the default similarity threshold of 0.7 (1.0 fails exact matches only), MinimumLength to test password minimum length, and MaximumLength to test password maximum length. + +- Role: edxapp + - Added `EDXAPP_LOGIN_REDIRECT_WHITELIST` which provides a whitelist of domains to which the login/logout pages will redirect. + +- Role: prospectus + - New role added to configure the prospectus service + +- Role: edxapp + - `EDXAPP_CACHE_BACKEND` added to allow overriding Django's memcache backend + +- Removed the obsolete install_stack.sh file (the last reference to fullstack) + +- Role: nginx + - Added `NGINX_EDXAPP_PROXY_INTERCEPT_ERRORS` to be able to use custom static error pages for error responses from the LMS. + - Added `NGINX_SERVER_HTML_FILES_TEMPLATE` to make the error file template configurable. + - Added `NGINX_SERVER_STATIC_FILES` to allow copying static contents to the server static folder. Can be used to deploy static contents for the error pages for example. + +- Role: analytics_api + - Added `basic_auth_exempted_paths` configuration for enterprise api endpoints + +- Role: edx_django_service + - Added optional `edx_django_service_allow_cors_headers` boolean option to pass CORS headers (`Access-Control-Allow-Origin` and `Access-Control-Allow-Methods`) on non basic-auth + calls to support `/api` endpoints for analytics_api. + +- Role: analytics_api + - Added `ANALYTICS_API_CORS_ORIGIN_WHITELIST` to allow CORS whitelisting of origins. + +- Role: edxapp + - `EDXAPP_X_FRAME_OPTIONS` added in studio to prevent clickjacking. + +- Role: nginx + - Added `NGINX_EDXAPP_DEFAULT_SITE_THEME` to allow to completely + override `favicon.ico` file when Comprehensive Theme is enabled. + +- Role: edxapp + - Added `EDXAPP_X_FRAME_OPTIONS` to prevent click jacking in LMS. + - git_clone: - The working tree is explicitly checked for modified files, to prevent mysterious failures. - Installation - OPENEDX_RELEASE is now required, to prevent accidental installation of master. + - sandbox.sh has been renamed native.sh to better indicate what it does. - XQueue - Expose CLOUDWATCH_QUEUE_COUNT_METRIC which is defined XQueue's settings.py for further dictionary structure @@ -99,7 +429,7 @@ - Role: edxapp - Added `EDXAPP_RETIRED_USERNAME_FMT`, `EDXAPP_RETIRED_EMAIL_FMT`, `EDXAPP_RETIRED_USER_SALTS`, and - `EDXAPP_RETIREMENT_SERVICE_WORKER_USERNAME` to generic_env_config to allow user retirement to be configurable. + `EDXAPP_RETIREMENT_SERVICE_USER_NAME` to generic_env_config to allow user retirement to be configurable. - Role: edxapp - Added `ENTERPRISE_REPORTING_SECRET` to CMS auth settings to allow edx-enterprise migrations to run. @@ -623,3 +953,6 @@ - Added `WHITELABEL_ADMIN_USER` to specify an admin user. - Added `WHITELABEL_DNS` for DNS settings of themes. - Added `WHITELABEL_ORG` for whitelabel organization settings. + +- Role: all + - Removed the unused task timing callback plugin. diff --git a/Makefile b/Makefile index 0504d565f60..8c60d936305 100755 --- a/Makefile +++ b/Makefile @@ -20,6 +20,7 @@ requirements: pip install -qr pre-requirements.txt --exists-action w pip install -qr requirements.txt --exists-action w +upgrade: export CUSTOM_COMPILE_COMMAND=make upgrade upgrade: ## update the pip requirements files to use the latest releases satisfying our constraints pip install -qr pre-requirements.txt --exists-action w pip install -qr requirements/pip-tools.txt @@ -27,9 +28,9 @@ upgrade: ## update the pip requirements files to use the latest releases satisfy pip-compile --upgrade -o requirements/pip-tools.txt requirements/pip-tools.in pip-compile --upgrade -o requirements.txt requirements/base.in pip-compile --upgrade -o playbooks/roles/aws/templates/requirements.txt.j2 requirements/aws.in - pip-compile --upgrade -o requirements3.txt requirements/ses-limits.in pip-compile --upgrade -o util/elasticsearch/requirements.txt requirements/elasticsearch.in - pip-compile --upgrade -o util/jenkins/requirements-celery.txt requirements/celery.in + pip-compile --upgrade -o util/jenkins/update_celery_monitoring/requirements.txt requirements/celery.in + pip-compile --upgrade -o util/jenkins/check_celery_progress/requirements.txt requirements/celery_progress.in pip-compile --upgrade -o util/jenkins/requirements-cloudflare.txt requirements/cloudflare.in pip-compile --upgrade -o util/pingdom/requirements.txt requirements/pingdom.in pip-compile --upgrade -o util/vpc-tools/requirements.txt requirements/vpc-tools.in @@ -38,9 +39,9 @@ upgrade: ## update the pip requirements files to use the latest releases satisfy requirements/pip-tools.txt \ requirements.txt \ playbooks/roles/aws/templates/requirements.txt.j2 \ - requirements3.txt \ util/elasticsearch/requirements.txt \ - util/jenkins/requirements-celery.txt \ + util/jenkins/update_celery_monitoring/requirements.txt \ + util/jenkins/check_celery_progress/requirements.txt \ util/jenkins/requirements-cloudflare.txt \ util/pingdom/requirements.txt \ util/vpc-tools/requirements.txt diff --git a/README.rst b/README.rst index 48618d20dbb..7f7c6a09c2c 100644 --- a/README.rst +++ b/README.rst @@ -1,39 +1,11 @@ Configuration Management ######################## -Introduction -************ - -The goal of the edx/configuration project is to provide a simple, but flexible, -way for anyone to stand up an instance of Open edX that is fully configured and -ready-to-go. - -Before getting started, please look at the `Open EdX Installation options`_, to -see which method for deploying OpenEdX is right for you. - -Building the platform takes place in two phases: - -- Infrastructure provisioning -- Service configuration - -As much as possible, we have tried to keep a clean distinction between -provisioning and configuration. You are not obliged to use our tools and are -free to use one, but not the other. The provisioning phase stands-up the -required resources and tags them with role identifiers so that the -configuration tool can come in and complete the job. - -**Note**: The Cloudformation templates used for infrastructure provisioning are -no longer maintained. We are working to move to a more modern and flexible -tool. - -The reference platform is provisioned using an Amazon `CloudFormation`_ -template. When the stack has been fully created you will have a new AWS Virtual -Private Cloud with hosts for the core Open edX services. This template will -build quite a number of AWS resources that cost money, so please consider this -before you start. - -The configuration phase is managed by `Ansible`_. We have provided a number of -playbooks that will configure each of the Open edX services. +This repository is a collection of tools and scripts that edx.org uses to deploy +openedx. The purpose of this repository is to share portions of our toolchain +with the community. This repository is *not* the best way to get started running +openedx. For that, please look at `Open EdX Installation options`_, which contains +links to the recommended paths for new installations. **Important**: The Open edX configuration scripts need to be run as root on your servers and will make changes to service configurations including, but not @@ -44,13 +16,54 @@ them against your servers. We also recommend against running them against servers that are hosting other applications. No warranty is expressed or implied. -For more information including installation instruction please see the `OpenEdX +For more information including installation instructions please see the `OpenEdX Wiki`_. For info on any large recent changes please see the `change log`_. +What is in this Repo? +********************* + +* `playbooks `__: This directory contains ansible playbooks that can + be used to configure individual services in the openedx platform. See + `Open EdX Installation options`_ before trying to use any of the scripts in + this directory. +* `docker `__: This directory contains dockerfiles that can be used to + test that playbooks execute cleanly. See `Makefiles `__ for + Documentation on how to run these containers. +* `requirements `__ : inputs for `pip-compile `__ + Update files in this directory and then run ``make upgrade`` to update + ``requirements.txt`` +* `tests `__: scripts used by travis-ci to test changes to this repo +* `util `__: one-off scripts or tools used to perform certain functions + related to openedx management. +* `vagrant `__: vagrant tooling for testing changes to this repo. + + +Roadmap +******* + +This repository is in ``sustained`` status. The goal is to deprecate this codebase +and move the deployment code into the repos with the application code. + +With the adoption of containerized application platforms like `Kubernetes +`__, the tools in this repository are complex +and inappropriate for building small single purpose containers. + +At edx.org, we are focusing on deployment of applications using `Terraform +`__ and `Kubernetes `__. We +hope to provide open source tooling for this soon. + + +Contributing +************ + +* Bugfixes: If you would like to contribute a bugfix to this codebase, please open + a pull request. A bot will automatically walk your contribution through the + `Open Source Contribution process `__. + + .. _Open EdX Installation options: https://open.edx.org/installation-options -.. _CloudFormation: http://aws.amazon.com/cloudformation/ .. _Ansible: http://ansible.com/ .. _OpenEdX Wiki: https://openedx.atlassian.net/wiki/display/OpenOPS/Open+edX+Operations+Home .. _change log: https://github.com/edx/configuration/blob/master/CHANGELOG.md diff --git a/docker.mk b/docker.mk index 02074590c9b..c77b73c9bc7 100644 --- a/docker.mk +++ b/docker.mk @@ -1,4 +1,4 @@ -.PHONY: docker.build docker.test docker.pkg +.PHONY: docker.build docker.pkg SHARD=0 SHARDS=1 @@ -8,9 +8,10 @@ all_images:=$(patsubst docker/build/%/Dockerfile,%,$(dockerfiles)) # Used in the test.mk file as well. images:=$(if $(TRAVIS_COMMIT_RANGE),$(shell git diff --name-only $(TRAVIS_COMMIT_RANGE) | python util/parsefiles.py),$(all_images)) +# Only use images that actually contain a Dockerfile +images:=$(shell echo "$(all_images) $(images)" | tr " " "\n" | sort | uniq -d) docker_build=docker.build. -docker_test=docker.test. docker_pkg=docker.pkg. docker_push=docker.push. @@ -24,12 +25,10 @@ docker.help: @echo ' $(docker_pull)$$image pull $$image from dockerhub' @echo '' @echo ' $(docker_build)$$container build $$container' - @echo ' $(docker_test)$$container test that $$container will build' @echo ' $(docker_pkg)$$container package $$container for a push to dockerhub' @echo ' $(docker_push)$$container push $$container to dockerhub ' @echo '' @echo ' docker.build build all defined docker containers (based on dockerhub base images)' - @echo ' docker.test test all defined docker containers' @echo ' docker.pkg package all defined docker containers (using local base images)' @echo ' docker.push push all defined docker containers' @echo '' @@ -42,8 +41,6 @@ docker_pull=docker.pull/ build: docker.build -test: docker.test - pkg: docker.pkg clean: docker.clean @@ -51,10 +48,7 @@ clean: docker.clean docker.clean: rm -rf .build -docker.test.shard: $(foreach image,$(shell echo $(images) | python util/balancecontainers.py $(SHARDS) | awk 'NR%$(SHARDS)==$(SHARD)'),$(docker_test)$(image)) - docker.build: $(foreach image,$(images),$(docker_build)$(image)) -docker.test: $(foreach image,$(images),$(docker_test)$(image)) docker.pkg: $(foreach image,$(images),$(docker_pkg)$(image)) docker.push: $(foreach image,$(images),$(docker_push)$(image)) @@ -64,9 +58,6 @@ $(docker_pull)%: $(docker_build)%: docker/build/%/Dockerfile docker build -f $< . -$(docker_test)%: .build/%/Dockerfile.test - docker build -t $*:test -f $< . - $(docker_pkg)%: .build/%/Dockerfile.pkg docker build -t $*:latest -f $< . @@ -77,25 +68,22 @@ $(docker_push)%: $(docker_pkg)% .build/%/Dockerfile.d: docker/build/%/Dockerfile Makefile @mkdir -p .build/$* - $(eval FROM=$(shell grep "^\s*FROM" $< | sed -E "s/FROM //" | sed -E "s/:/@/g")) + $(eval BASE_IMAGE_TAG=$(shell grep "^\s*ARG BASE_IMAGE_TAG" $< | sed -E "s/ARG BASE_IMAGE_TAG=//")) + @# I have no idea why the final sed is eating the first character of the substitution... + $(eval FROM=$(shell grep "^\s*FROM" docker/build/ecommerce/Dockerfile | sed -E "s/FROM //" | sed -E "s/:/@/g" | sed -E 's/\$\{BASE_IMAGE_TAG\}/ $(BASE_IMAGE_TAG)/')) $(eval EDXOPS_FROM=$(shell echo "$(FROM)" | sed -E "s#edxops/([^@]+)(@.*)?#\1#")) + @echo "Base Image Tag: $(BASE_IMAGE_TAG)" + @echo $(FROM) + @echo $(EDXOPS_FROM) @echo "$(docker_build)$*: $(docker_pull)$(FROM)" > $@ @if [ "$(EDXOPS_FROM)" != "$(FROM)" ]; then \ - echo "$(docker_test)$*: $(docker_test)$(EDXOPS_FROM:@%=)" >> $@; \ echo "$(docker_pkg)$*: $(docker_pkg)$(EDXOPS_FROM:@%=)" >> $@; \ else \ - echo "$(docker_test)$*: $(docker_pull)$(FROM)" >> $@; \ echo "$(docker_pkg)$*: $(docker_pull)$(FROM)" >> $@; \ fi -.build/%/Dockerfile.test: docker/build/%/Dockerfile Makefile - @mkdir -p .build/$* - @# perl p (print the line) n (loop over every line) e (exec the regex), like sed but cross platform - @perl -pne "s#FROM edxops/([^:]+)(:\S*)?#FROM \1:test#" $< > $@ - .build/%/Dockerfile.pkg: docker/build/%/Dockerfile Makefile @mkdir -p .build/$* @# perl p (print the line) n (loop over every line) e (exec the regex), like sed but cross platform - @perl -pne "s#FROM edxops/([^:]+)(:\S*)?#FROM \1:test#" $< > $@ -include $(foreach image,$(images),.build/$(image)/Dockerfile.d) diff --git a/docker/build/analytics_api/Dockerfile b/docker/build/analytics_api/Dockerfile index 6ad328b3b4a..5b7342d201e 100644 --- a/docker/build/analytics_api/Dockerfile +++ b/docker/build/analytics_api/Dockerfile @@ -9,7 +9,7 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" RUN apt-get update @@ -18,15 +18,17 @@ COPY docker/build/analytics_api/ansible_overrides.yml / WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays COPY docker/build/analytics_api/ansible_overrides.yml / +COPY docker/build/analytics_api/analytics_api.yml /edx/etc/analytics_api.yml ARG OPENEDX_RELEASE=master ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook analytics_api.yml \ -i '127.0.0.1,' \ -c local \ - -t "install:base,install:system-requirements,install:configuration,install:app-requirements,install:code" \ + -t "install:base,install:system-requirements,install:configuration,install:app-requirements,install:code,devstack:install" \ --extra-vars="ANALYTICS_API_VERSION=${OPENEDX_RELEASE}" \ --extra-vars="@/ansible_overrides.yml" WORKDIR /edx/app/ -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] +ENTRYPOINT ["/edx/app/analytics_api/devstack.sh"] +CMD ["start"] EXPOSE 443 80 diff --git a/docker/build/analytics_api/analytics_api.yml b/docker/build/analytics_api/analytics_api.yml new file mode 100644 index 00000000000..612c0591911 --- /dev/null +++ b/docker/build/analytics_api/analytics_api.yml @@ -0,0 +1,77 @@ +--- + +AGGREGATE_PAGE_SIZE: 10 +ANALYTICS_DATABASE: reports +API_AUTH_TOKEN: put-your-api-token-here +API_ROOT: null +BACKEND_SERVICE_EDX_OAUTH2_KEY: analytics_api-backend-service-key +BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://127.0.0.1:8000/oauth2 +BACKEND_SERVICE_EDX_OAUTH2_SECRET: analytics_api-backend-service-secret +CACHES: + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_PREFIX: analytics_api + LOCATION: + - memcache +CSRF_COOKIE_SECURE: false +DATABASES: + default: + ENGINE: django.db.backends.mysql + HOST: db.edx + NAME: analytics-api + PASSWORD: password + PORT: '3306' + USER: api001 + reports: + ENGINE: django.db.backends.mysql + HOST: db.edx + NAME: reports + PASSWORD: password + PORT: '3306' + USER: reports001 +DATETIME_FORMAT: '%Y-%m-%dT%H%M%S' +DATE_FORMAT: '%Y-%m-%d' +DEFAULT_PAGE_SIZE: 25 +EDX_DRF_EXTENSIONS: + OAUTH2_USER_INFO_URL: http://127.0.0.1:8000/oauth2/user_info +ELASTICSEARCH_AWS_ACCESS_KEY_ID: null +ELASTICSEARCH_AWS_SECRET_ACCESS_KEY: null +ELASTICSEARCH_CONNECTION_CLASS: null +ELASTICSEARCH_CONNECTION_DEFAULT_REGION: us-east-1 +ELASTICSEARCH_LEARNERS_HOST: edx.devstack.elasticsearch +ELASTICSEARCH_LEARNERS_INDEX: roster_1_2 +ELASTICSEARCH_LEARNERS_UPDATE_INDEX: index_updates +EXTRA_APPS: [] +JWT_AUTH: + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUERS: + - AUDIENCE: SET-ME-PLEASE + ISSUER: http://127.0.0.1:8000/oauth2 + SECRET_KEY: SET-ME-PLEASE + JWT_PUBLIC_SIGNING_JWK_SET: '' +LANGUAGE_CODE: en-us +LMS_BASE_URL: http://127.0.0.1:8000/ +MAX_PAGE_SIZE: 100 +MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage + MEDIA_ROOT: /edx/var/analytics_api/media + MEDIA_URL: /media/ +REPORT_DOWNLOAD_BACKEND: + COURSE_REPORT_FILE_LOCATION_TEMPLATE: '{course_id}_{report_name}.csv' + DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage + MEDIA_ROOT: /edx/var/analytics_api/static/reports + MEDIA_URL: http://localhost:8100/static/reports/ +SECRET_KEY: Your secret key here +SESSION_EXPIRE_AT_BROWSER_CLOSE: false +SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 +SOCIAL_AUTH_EDX_OAUTH2_KEY: analytics_api-sso-key +SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://127.0.0.1:8000/logout +SOCIAL_AUTH_EDX_OAUTH2_SECRET: analytics_api-sso-secret +SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 +SOCIAL_AUTH_REDIRECT_IS_HTTPS: false +STATICFILES_DIRS: +- static +STATICFILES_STORAGE: django.contrib.staticfiles.storage.StaticFilesStorage +STATIC_ROOT: /edx/var/analytics_api/staticfiles +TIME_ZONE: UTC diff --git a/docker/build/analytics_api/ansible_overrides.yml b/docker/build/analytics_api/ansible_overrides.yml index 299cd543370..d5b6dc54258 100644 --- a/docker/build/analytics_api/ansible_overrides.yml +++ b/docker/build/analytics_api/ansible_overrides.yml @@ -1,5 +1,9 @@ --- +edx_django_service_is_devstack: True + +ANALYTICS_API_DJANGO_SETTINGS_MODULE: "analyticsdataserver.settings.devstack" + DOCKER_TLD: "edx" ANALYTICS_API_DATABASES: diff --git a/docker/build/analytics_pipeline/Dockerfile b/docker/build/analytics_pipeline/Dockerfile index 55eb7ac7066..539ec5a752d 100644 --- a/docker/build/analytics_pipeline/Dockerfile +++ b/docker/build/analytics_pipeline/Dockerfile @@ -1,6 +1,6 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" USER root ENV BOTO_CONFIG=/dev/null \ @@ -20,6 +20,8 @@ ENV BOTO_CONFIG=/dev/null \ SQOOP_MYSQL_CONNECTOR_FILE=mysql-connector-java-5.1.29 \ SQOOP_HOME=/edx/app/hadoop/sqoop \ SQOOP_LIB=/edx/app/hadoop/sqoop/lib \ + SQOOP_VERTICA_CONNECTOR_URL=https://vertica.com/client_drivers/9.1.x/9.1.1-0/vertica-jdbc-9.1.1-0.jar \ + SQOOP_VERTICA_CONNECTOR_FILE=vertica-jdbc-9.1.1-0.jar \ SPARK_URL=https://archive.apache.org/dist/spark/spark-2.1.0/spark-2.1.0-bin-hadoop2.7.tgz \ SPARK_DIST_FILE=spark-2.1.0-bin-hadoop2.7.tgz \ SPARK_HOME=/edx/app/hadoop/spark \ @@ -27,10 +29,9 @@ ENV BOTO_CONFIG=/dev/null \ ANALYTICS_PIPELINE_VENV=/edx/app/analytics_pipeline/venvs \ BOOTSTRAP=/etc/bootstrap.sh \ COMMON_BASE_DIR=/edx \ - COMMON_PIP_PACKAGES_PIP='pip==9.0.3' \ - COMMON_PIP_PACKAGES_SETUPTOOLS='setuptools==39.0.1' \ - COMMON_PIP_PACKAGES_VIRTUALENV='virtualenv==15.2.0' \ - COMMON_PIP_PACKAGES_VIRTUALENVWRAPPER='virtualenvwrapper==4.8.2' \ + COMMON_PIP_PACKAGES_PIP='pip==20.0.2' \ + COMMON_PIP_PACKAGES_SETUPTOOLS='setuptools==44.1.0' \ + COMMON_PIP_PACKAGES_VIRTUALENV='virtualenv==16.7.10' \ COMMON_MYSQL_READ_ONLY_USER='read_only' \ COMMON_MYSQL_READ_ONLY_PASS='password' \ ANALYTICS_PIPELINE_OUTPUT_DATABASE_USER='pipeline001' \ @@ -98,11 +99,13 @@ RUN curl -fSL "$SPARK_URL" -o /var/tmp/$SPARK_DIST_FILE \ # SQOOP RUN curl -fSL "$SQOOP_URL" -o /var/tmp/$SQOOP_DIST_FILE \ && curl -fSL "$SQOOP_MYSQL_CONNECTOR_URL" -o /var/tmp/$SQOOP_MYSQL_CONNECTOR_FILE.tar.gz \ + && curl -fSL "$SQOOP_VERTICA_CONNECTOR_URL" -o /var/tmp/$SQOOP_VERTICA_CONNECTOR_FILE \ && tar -xzf /var/tmp/$SQOOP_DIST_FILE -C $SQOOP_HOME --strip-components=1 \ && tar -xzf /var/tmp/$SQOOP_MYSQL_CONNECTOR_FILE.tar.gz -C /var/tmp/ \ && cp /var/tmp/$SQOOP_MYSQL_CONNECTOR_FILE/$SQOOP_MYSQL_CONNECTOR_FILE-bin.jar $SQOOP_LIB \ && cp /var/tmp/$SQOOP_MYSQL_CONNECTOR_FILE/$SQOOP_MYSQL_CONNECTOR_FILE-bin.jar $HIVE_HOME/lib/ \ - && rm -rf /var/tmp/$SQOOP_DIST_FILE /var/tmp/$SQOOP_MYSQL_CONNECTOR_FILE* + && cp /var/tmp/$SQOOP_VERTICA_CONNECTOR_FILE $SQOOP_LIB \ + && rm -rf /var/tmp/$SQOOP_DIST_FILE /var/tmp/$SQOOP_MYSQL_CONNECTOR_FILE* /var/tmp/$SQOOP_VERTICA_CONNECTOR_FILE* WORKDIR /var/tmp # Edx Hadoop Util Library @@ -128,7 +131,7 @@ RUN git clone https://github.com/edx/edx-analytics-pipeline \ && cp -r /var/tmp/edx-analytics-pipeline/requirements /var/tmp/requirements \ && rm -rf /var/tmp/edx-analytics-pipeline -RUN pip install $COMMON_PIP_PACKAGES_PIP $COMMON_PIP_PACKAGES_SETUPTOOLS $COMMON_PIP_PACKAGES_VIRTUALENV $COMMON_PIP_PACKAGES_VIRTUALENVWRAPPER \ +RUN pip install $COMMON_PIP_PACKAGES_PIP $COMMON_PIP_PACKAGES_SETUPTOOLS $COMMON_PIP_PACKAGES_VIRTUALENV \ && virtualenv $ANALYTICS_PIPELINE_VENV/analytics_pipeline/ \ && chown -R hadoop:hadoop $ANALYTICS_PIPELINE_VENV/analytics_pipeline/ \ && echo '[hadoop]\nversion: cdh4\ncommand: /edx/app/hadoop/hadoop/bin/hadoop\nstreaming-jar: /edx/app/hadoop/hadoop/share/hadoop/tools/lib/hadoop-streaming-2.7.2.jar' > /etc/luigi/client.cfg diff --git a/docker/build/analytics_pipeline/devstack.sh b/docker/build/analytics_pipeline/devstack.sh index a13e4a6119d..348eb1c47e6 100644 --- a/docker/build/analytics_pipeline/devstack.sh +++ b/docker/build/analytics_pipeline/devstack.sh @@ -3,10 +3,24 @@ COMMAND=$1 case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; open) . /edx/app/analytics_pipeline/venvs/analytics_pipeline/bin/activate cd /edx/app/analytics_pipeline/analytics_pipeline /bin/bash ;; + exec) + shift + + . /edx/app/analytics_pipeline/venvs/analytics_pipeline/bin/activate + cd /edx/app/analytics_pipeline/analytics_pipeline + + "$@" + ;; + *) + "$@" + ;; esac diff --git a/docker/build/analytics_pipeline_hadoop_datanode/Dockerfile b/docker/build/analytics_pipeline_hadoop_datanode/Dockerfile index 853d1f16d6e..b14f85bcc8a 100644 --- a/docker/build/analytics_pipeline_hadoop_datanode/Dockerfile +++ b/docker/build/analytics_pipeline_hadoop_datanode/Dockerfile @@ -1,15 +1,20 @@ FROM uhopper/hadoop:2.7.2 -MAINTAINER edxops +LABEL maintainer="edxops" + +RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/backports.list + +RUN \ +echo "deb http://mirrors.linode.com/debian/ stretch main" > /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list ENV HDFS_CONF_dfs_datanode_data_dir=file:///hadoop/dfs/data \ MYSQL_VERSION=5.6 \ DEBIAN_FRONTEND=noninteractive WORKDIR /tmp -RUN \ - echo "deb http://ftp.de.debian.org/debian/ stretch main non-free contrib\n" > /etc/apt/sources.list && \ - echo "deb-src http://ftp.de.debian.org/debian/ stretch main non-free contrib\n" >> /etc/apt/sources.list && \ - echo "deb http://security.debian.org/ stretch/updates main contrib non-free\n" >> /etc/apt/sources.list && \ - echo "deb-src http://security.debian.org/ stretch/updates main contrib non-free" >> /etc/apt/sources.list RUN apt-get -y update RUN apt-get -yqq install apt-transport-https lsb-release ca-certificates gnupg2 diff --git a/docker/build/analytics_pipeline_hadoop_namenode/Dockerfile b/docker/build/analytics_pipeline_hadoop_namenode/Dockerfile index 1f372d9444c..80143432384 100644 --- a/docker/build/analytics_pipeline_hadoop_namenode/Dockerfile +++ b/docker/build/analytics_pipeline_hadoop_namenode/Dockerfile @@ -1,16 +1,20 @@ FROM uhopper/hadoop:2.7.2 -MAINTAINER edxops +LABEL maintainer="edxops" + +RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/backports.list + +RUN \ +echo "deb http://mirrors.linode.com/debian/ stretch main" > /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list ENV HDFS_CONF_dfs_namenode_name_dir=file:///hadoop/dfs/name \ MYSQL_VERSION=5.6 \ DEBIAN_FRONTEND=noninteractive WORKDIR /tmp -RUN \ - echo "deb http://ftp.de.debian.org/debian/ stretch main non-free contrib\n" > /etc/apt/sources.list && \ - echo "deb-src http://ftp.de.debian.org/debian/ stretch main non-free contrib\n" >> /etc/apt/sources.list && \ - echo "deb http://security.debian.org/ stretch/updates main contrib non-free\n" >> /etc/apt/sources.list && \ - echo "deb-src http://security.debian.org/ stretch/updates main contrib non-free" >> /etc/apt/sources.list - RUN apt-get -y update RUN apt-get -yqq install apt-transport-https lsb-release ca-certificates gnupg2 RUN ( apt-key adv --keyserver ha.pool.sks-keyservers.net --recv-keys A4A9406876FCBD3C456770C88C718D3B5072E1F5 \ diff --git a/docker/build/analytics_pipeline_hadoop_nodemanager/Dockerfile b/docker/build/analytics_pipeline_hadoop_nodemanager/Dockerfile index 67708edfe48..00130c896f4 100644 --- a/docker/build/analytics_pipeline_hadoop_nodemanager/Dockerfile +++ b/docker/build/analytics_pipeline_hadoop_nodemanager/Dockerfile @@ -1,13 +1,18 @@ FROM uhopper/hadoop:2.7.2 -MAINTAINER edxops +LABEL maintainer="edxops" + +RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/backports.list + +RUN \ +echo "deb http://mirrors.linode.com/debian/ stretch main" > /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list ENV MYSQL_VERSION=5.6 DEBIAN_FRONTEND=noninteractive WORKDIR /tmp -RUN \ - echo "deb http://ftp.de.debian.org/debian/ stretch main non-free contrib\n" > /etc/apt/sources.list && \ - echo "deb-src http://ftp.de.debian.org/debian/ stretch main non-free contrib\n" >> /etc/apt/sources.list && \ - echo "deb http://security.debian.org/ stretch/updates main contrib non-free\n" >> /etc/apt/sources.list && \ - echo "deb-src http://security.debian.org/ stretch/updates main contrib non-free" >> /etc/apt/sources.list RUN apt-get -y update RUN apt-get -yqq install apt-transport-https lsb-release ca-certificates gnupg2 diff --git a/docker/build/analytics_pipeline_hadoop_resourcemanager/Dockerfile b/docker/build/analytics_pipeline_hadoop_resourcemanager/Dockerfile index f49a91e4400..9e91460e583 100644 --- a/docker/build/analytics_pipeline_hadoop_resourcemanager/Dockerfile +++ b/docker/build/analytics_pipeline_hadoop_resourcemanager/Dockerfile @@ -1,13 +1,18 @@ FROM uhopper/hadoop:2.7.2 -MAINTAINER edxops +LABEL maintainer="edxops" + +RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/backports.list + +RUN \ +echo "deb http://mirrors.linode.com/debian/ stretch main" > /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian-security/ stretch/updates main" >> /etc/apt/sources.list && \ +echo "deb http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list && \ +echo "deb-src http://mirrors.linode.com/debian/ stretch-updates main" >> /etc/apt/sources.list ENV MYSQL_VERSION=5.6 DEBIAN_FRONTEND=noninteractive WORKDIR /tmp -RUN \ - echo "deb http://ftp.de.debian.org/debian/ stretch main non-free contrib\n" > /etc/apt/sources.list && \ - echo "deb-src http://ftp.de.debian.org/debian/ stretch main non-free contrib\n" >> /etc/apt/sources.list && \ - echo "deb http://security.debian.org/ stretch/updates main contrib non-free\n" >> /etc/apt/sources.list && \ - echo "deb-src http://security.debian.org/ stretch/updates main contrib non-free" >> /etc/apt/sources.list RUN apt-get -y update RUN apt-get -yqq install apt-transport-https lsb-release ca-certificates gnupg2 diff --git a/docker/build/analytics_pipeline_spark_master/Dockerfile b/docker/build/analytics_pipeline_spark_master/Dockerfile index d8c624b8eaa..4271462ede0 100644 --- a/docker/build/analytics_pipeline_spark_master/Dockerfile +++ b/docker/build/analytics_pipeline_spark_master/Dockerfile @@ -1,5 +1,8 @@ FROM bde2020/spark-base:2.1.0-hadoop2.7 -MAINTAINER edxops +LABEL maintainer="edxops" + +RUN echo "deb [check-valid-until=no] http://archive.debian.org/debian jessie-backports main" > /etc/apt/sources.list.d/jessie-backports.list +RUN sed -i '/deb http:\/\/deb.debian.org\/debian jessie-updates main/d' /etc/apt/sources.list ADD docker/build/analytics_pipeline_spark_master/master.sh / ENV SPARK_MASTER_PORT=7077 \ @@ -29,7 +32,7 @@ ENV SPARK_MASTER_PORT=7077 \ YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030 \ YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031 -RUN apt-get -y update && apt-get -y install --reinstall python-pkg-resources \ +RUN apt-get -o Acquire::Check-Valid-Until=false update && apt-get -y install --reinstall python-pkg-resources \ && echo 'spark.master spark://sparkmaster:7077\nspark.eventLog.enabled true\nspark.eventLog.dir hdfs://namenode:8020/tmp/spark-events\nspark.history.fs.logDirectory hdfs://namenode:8020/tmp/spark-events' > /spark/conf/spark-defaults.conf CMD ["/bin/bash", "/master.sh"] diff --git a/docker/build/analytics_pipeline_spark_worker/Dockerfile b/docker/build/analytics_pipeline_spark_worker/Dockerfile index 0c0db2dcc4c..5eb4b1ae85a 100644 --- a/docker/build/analytics_pipeline_spark_worker/Dockerfile +++ b/docker/build/analytics_pipeline_spark_worker/Dockerfile @@ -1,5 +1,8 @@ FROM bde2020/spark-base:2.1.0-hadoop2.7 -MAINTAINER edxops +LABEL maintainer="edxops" + +RUN echo "deb [check-valid-until=no] http://archive.debian.org/debian jessie-backports main" > /etc/apt/sources.list.d/jessie-backports.list +RUN sed -i '/deb http:\/\/deb.debian.org\/debian jessie-updates main/d' /etc/apt/sources.list ADD docker/build/analytics_pipeline_spark_worker/worker.sh / ENV SPARK_WORKER_WEBUI_PORT=8081 \ @@ -27,6 +30,9 @@ ENV SPARK_WORKER_WEBUI_PORT=8081 \ YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030 \ YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031 -RUN apt-get -y update && apt-get -y install --reinstall python-pkg-resources +RUN ( apt-key adv --keyserver keyserver.ubuntu.com --recv-key 04EE7237B7D453EC \ + || apt-key adv --keyserver keyserver.ubuntu.com --recv-key 648ACFD622F3D138) + +RUN apt-get -o Acquire::Check-Valid-Until=false update && apt-get -y install --reinstall python-pkg-resources CMD ["/bin/bash", "/worker.sh"] EXPOSE 8081 diff --git a/docker/build/automated/Dockerfile b/docker/build/automated/Dockerfile index 20e06632511..cf164d9fd87 100644 --- a/docker/build/automated/Dockerfile +++ b/docker/build/automated/Dockerfile @@ -1,5 +1,5 @@ FROM edxops/xenial-common:latest -MAINTAINER edxops +LABEL maintainer="edxops" ADD . /edx/app/edx_ansible/edx_ansible COPY docker/build/automated/ansible_overrides.yml / diff --git a/docker/build/bionic-common/Dockerfile b/docker/build/bionic-common/Dockerfile new file mode 100644 index 00000000000..c99809f1ebd --- /dev/null +++ b/docker/build/bionic-common/Dockerfile @@ -0,0 +1,22 @@ +FROM ubuntu:bionic + +# Set locale to UTF-8 which is not the default for docker. +# See the links for details: +# http://jaredmarkell.com/docker-and-locales/ +# https://github.com/docker-library/python/issues/13 +# https://github.com/docker-library/python/pull/14/files +RUN apt-get update &&\ + apt-get install -y locales &&\ + locale-gen en_US.UTF-8 +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 + +ENV ANSIBLE_REPO="https://github.com/edx/ansible" +ENV CONFIGURATION_REPO="https://github.com/edx/configuration.git" +ARG OPENEDX_RELEASE=master +ENV CONFIGURATION_VERSION="${OPENEDX_RELEASE}" + +ADD util/install/ansible-bootstrap.sh /tmp/ansible-bootstrap.sh +RUN chmod +x /tmp/ansible-bootstrap.sh +RUN /tmp/ansible-bootstrap.sh diff --git a/docker/build/chrome/Dockerfile b/docker/build/chrome/Dockerfile index 2cf9e267cc0..d4811e942ba 100644 --- a/docker/build/chrome/Dockerfile +++ b/docker/build/chrome/Dockerfile @@ -1,5 +1,5 @@ -FROM selenium/standalone-chrome-debug:3.4.0-einsteinium -MAINTAINER edxops +FROM selenium/standalone-chrome-debug:3.14.0-arsenic +LABEL maintainer="edxops" USER root diff --git a/docker/build/credentials/Dockerfile b/docker/build/credentials/Dockerfile index adb5a6b01f4..80bbf6c9d59 100644 --- a/docker/build/credentials/Dockerfile +++ b/docker/build/credentials/Dockerfile @@ -9,15 +9,17 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" USER root -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] +ENTRYPOINT ["/edx/app/credentials/devstack.sh"] +CMD ["start"] ADD . /edx/app/edx_ansible/edx_ansible WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays COPY docker/build/credentials/ansible_overrides.yml / -COPY docker/build/devstack/ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/build/credentials/credentials.yml /edx/etc/credentials.yml ARG OPENEDX_RELEASE=master ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} diff --git a/docker/build/credentials/credentials.yml b/docker/build/credentials/credentials.yml new file mode 100644 index 00000000000..4cd04b6c997 --- /dev/null +++ b/docker/build/credentials/credentials.yml @@ -0,0 +1,61 @@ +--- + +API_ROOT: null +BACKEND_SERVICE_EDX_OAUTH2_KEY: credentials-backend-service-key +BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://localhost:18000/oauth2 +BACKEND_SERVICE_EDX_OAUTH2_SECRET: credentials-backend-service-secret +CACHES: + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_PREFIX: credentials + LOCATION: + - edx.devstack.memcached:11211 +CERTIFICATE_LANGUAGES: + en: English + es_419: Spanish +CREDENTIALS_SERVICE_USER: credentials_service_user +CSRF_COOKIE_SECURE: false +DATABASES: + default: + ATOMIC_REQUESTS: false + CONN_MAX_AGE: 60 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: credentials + OPTIONS: + connect_timeout: 10 + init_command: SET sql_mode='STRICT_TRANS_TABLES' + PASSWORD: password + PORT: '3306' + USER: credentials001 +EDX_DRF_EXTENSIONS: + OAUTH2_USER_INFO_URL: http://edx.devstack.lms:18000/oauth2/user_info +EXTRA_APPS: +- credentials.apps.edx_credentials_extensions +FILE_STORAGE_BACKEND: {} +JWT_AUTH: + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUERS: + - AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret + JWT_PUBLIC_SIGNING_JWK_SET: '' +LANGUAGE_CODE: en +LANGUAGE_COOKIE_NAME: openedx-language-preference +MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage + MEDIA_ROOT: /edx/var/credentials/media + MEDIA_URL: /media/ +SECRET_KEY: SET-ME-TO-A-UNIQUE-LONG-RANDOM-STRING +SESSION_EXPIRE_AT_BROWSER_CLOSE: false +SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 +SOCIAL_AUTH_EDX_OAUTH2_KEY: credentials-sso-key +SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout +SOCIAL_AUTH_EDX_OAUTH2_SECRET: credentials-sso-secret +SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 +SOCIAL_AUTH_REDIRECT_IS_HTTPS: false +STATICFILES_STORAGE: django.contrib.staticfiles.storage.ManifestStaticFilesStorage +STATIC_ROOT: /edx/var/credentials/staticfiles +TIME_ZONE: UTC +USERNAME_REPLACEMENT_WORKER: OVERRIDE THIS WITH A VALID USERNAME diff --git a/docker/build/designer/Dockerfile b/docker/build/designer/Dockerfile new file mode 100644 index 00000000000..7c9c95a0abf --- /dev/null +++ b/docker/build/designer/Dockerfile @@ -0,0 +1,32 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/designer/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/xenial-common:${BASE_IMAGE_TAG} +MAINTAINER edxops +USER root +ENTRYPOINT ["/edx/app/designer/devstack.sh"] +CMD ["start"] + +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/designer/ansible_overrides.yml / +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook designer.yml \ + -c local -i "127.0.0.1," \ + -t "install,assets,devstack" \ + --extra-vars="DESIGNER_VERSION=${OPENEDX_RELEASE}" \ + --extra-vars="@/ansible_overrides.yml" \ + --extra-vars="@/devstack/ansible_overrides.yml" + +EXPOSE 18808 diff --git a/docker/build/designer/ansible_overrides.yml b/docker/build/designer/ansible_overrides.yml new file mode 100644 index 00000000000..f1b30d9eab3 --- /dev/null +++ b/docker/build/designer/ansible_overrides.yml @@ -0,0 +1,14 @@ +--- +COMMON_GIT_PATH: 'edx' + +COMMON_MYSQL_MIGRATE_USER: '{{ DESIGNER_MYSQL_USER }}' +COMMON_MYSQL_MIGRATE_PASS: '{{ DESIGNER_MYSQL_PASSWORD }}' + +DESIGNER_MYSQL_HOST: 'edx.devstack.mysql' +DESIGNER_DJANGO_SETTINGS_MODULE: 'designer.settings.devstack' +DESIGNER_GUNICORN_EXTRA: '--reload' +DESIGNER_MEMCACHE: ['edx.devstack.memcached:11211'] +DESIGNER_EXTRA_APPS: [] +DESIGNER_URL_ROOT: 'http://designer:18808' + +edx_django_service_is_devstack: true diff --git a/docker/build/devpi/Dockerfile b/docker/build/devpi/Dockerfile index 0e91a0529fd..a460ea2b4ec 100644 --- a/docker/build/devpi/Dockerfile +++ b/docker/build/devpi/Dockerfile @@ -9,7 +9,7 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" ARG ARG_DEVPI_SERVER_VERSION=4.4.0 ARG ARG_DEVPI_WEB_VERSION=3.2.2 @@ -21,7 +21,7 @@ WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays RUN apt-get update -COPY docker/build/devstack/ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook devpi.yml \ -c local -i '127.0.0.1,' \ diff --git a/docker/build/discovery/Dockerfile b/docker/build/discovery/Dockerfile index ccf23657f60..f680bb8b6e7 100644 --- a/docker/build/discovery/Dockerfile +++ b/docker/build/discovery/Dockerfile @@ -9,15 +9,17 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" USER root -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] +ENTRYPOINT ["/edx/app/discovery/devstack.sh"] +CMD ["start"] ADD . /edx/app/edx_ansible/edx_ansible WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays COPY docker/build/discovery/ansible_overrides.yml / -COPY docker/build/devstack/ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/build/discovery/discovery.yml /edx/etc/discovery.yml ARG OPENEDX_RELEASE=master ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} diff --git a/docker/build/discovery/ansible_overrides.yml b/docker/build/discovery/ansible_overrides.yml index 1bde5aad668..d78611fc76f 100644 --- a/docker/build/discovery/ansible_overrides.yml +++ b/docker/build/discovery/ansible_overrides.yml @@ -5,6 +5,7 @@ COMMON_MYSQL_MIGRATE_USER: '{{ DISCOVERY_MYSQL_USER }}' COMMON_MYSQL_MIGRATE_PASS: '{{ DISCOVERY_MYSQL_PASSWORD }}' DISCOVERY_MYSQL: 'edx.devstack.mysql' +DISCOVERY_MYSQL_REPLICA_HOST: 'edx.devstack.mysql' DISCOVERY_DJANGO_SETTINGS_MODULE: 'course_discovery.settings.devstack' DISCOVERY_ELASTICSEARCH_HOST: 'edx.devstack.elasticsearch' DISCOVERY_GUNICORN_EXTRA: '--reload' diff --git a/docker/build/discovery/discovery.yml b/docker/build/discovery/discovery.yml new file mode 100644 index 00000000000..900e5a3ba52 --- /dev/null +++ b/docker/build/discovery/discovery.yml @@ -0,0 +1,90 @@ +--- + +API_ROOT: null +AWS_SES_REGION_ENDPOINT: email.us-east-1.amazonaws.com +AWS_SES_REGION_NAME: us-east-1 +BACKEND_SERVICE_EDX_OAUTH2_KEY: discovery-backend-service-key +BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://localhost:18000/oauth2 +BACKEND_SERVICE_EDX_OAUTH2_SECRET: discovery-backend-service-secret +CACHES: + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_PREFIX: discovery + LOCATION: + - edx.devstack.memcached:11211 +CORS_ORIGIN_WHITELIST: [] +CSRF_COOKIE_SECURE: false +DATABASES: + default: + ATOMIC_REQUESTS: 'false' + CONN_MAX_AGE: 60 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: discovery + OPTIONS: + connect_timeout: 10 + init_command: SET sql_mode='STRICT_TRANS_TABLES' + PASSWORD: password + PORT: 3306 + USER: discov001 + read_replica: + ATOMIC_REQUESTS: 'false' + CONN_MAX_AGE: 60 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: discovery + OPTIONS: + connect_timeout: 10 + init_command: SET sql_mode='STRICT_TRANS_TABLES' + PASSWORD: password + PORT: 3306 + USER: discov001 +DEFAULT_PARTNER_ID: 1 +EDX_DRF_EXTENSIONS: + OAUTH2_USER_INFO_URL: http://edx.devstack.lms:18000/oauth2/user_info +ELASTICSEARCH_INDEX_NAME: catalog +ELASTICSEARCH_URL: http://127.0.0.1:9200/ +EMAIL_BACKEND: django_ses.SESBackend +EMAIL_HOST: localhost +EMAIL_HOST_PASSWORD: '' +EMAIL_HOST_USER: '' +EMAIL_PORT: 25 +EMAIL_USE_TLS: false +ENABLE_PUBLISHER: false +EXTRA_APPS: +- course_discovery.apps.edx_catalog_extensions +JWT_AUTH: + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUERS: + - AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret + JWT_PUBLIC_SIGNING_JWK_SET: '' +LANGUAGE_CODE: en +MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage + MEDIA_ROOT: /edx/var/discovery/media + MEDIA_URL: /media/ +OPENEXCHANGERATES_API_KEY: '' +PARLER_DEFAULT_LANGUAGE_CODE: en +PARLER_LANGUAGES: + 1: + - code: en + default: + fallbacks: + - en + hide_untranslated: 'False' +PLATFORM_NAME: Your Platform Name Here +PUBLISHER_FROM_EMAIL: null +SECRET_KEY: Your secret key here +SESSION_EXPIRE_AT_BROWSER_CLOSE: false +SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 +SOCIAL_AUTH_EDX_OAUTH2_KEY: discovery-sso-key +SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout +SOCIAL_AUTH_EDX_OAUTH2_SECRET: discovery-sso-secret +SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 +STATICFILES_STORAGE: django.contrib.staticfiles.storage.StaticFilesStorage +STATIC_ROOT: /edx/var/discovery/staticfiles +TIME_ZONE: UTC +USERNAME_REPLACEMENT_WORKER: OVERRIDE THIS WITH A VALID USERNAME diff --git a/docker/build/docker-tools/Dockerfile b/docker/build/docker-tools/Dockerfile index 3da3103e38d..0b18391a747 100644 --- a/docker/build/docker-tools/Dockerfile +++ b/docker/build/docker-tools/Dockerfile @@ -8,7 +8,7 @@ # with the currently checked-out configuration repo. FROM edxops/xenial-common:latest -MAINTAINER edxops +LABEL maintainer="edxops" ENV REPO_OWNER=edx diff --git a/docker/build/ecommerce/Dockerfile b/docker/build/ecommerce/Dockerfile index c93e3caf130..866244a9601 100644 --- a/docker/build/ecommerce/Dockerfile +++ b/docker/build/ecommerce/Dockerfile @@ -9,15 +9,17 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" USER root -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] +ENTRYPOINT ["/edx/app/ecommerce/devstack.sh"] +CMD ["start"] ADD . /edx/app/edx_ansible/edx_ansible WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays COPY docker/build/ecommerce/ansible_overrides.yml / -COPY docker/build/devstack/ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/build/ecommerce/ecommerce.yml /edx/etc/ecommerce.yml ARG OPENEDX_RELEASE=master ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} diff --git a/docker/build/ecommerce/ansible_overrides.yml b/docker/build/ecommerce/ansible_overrides.yml index 88de007aa4c..a762eabcc90 100644 --- a/docker/build/ecommerce/ansible_overrides.yml +++ b/docker/build/ecommerce/ansible_overrides.yml @@ -22,6 +22,7 @@ ECOMMERCE_MEMCACHE: ['edx.devstack.memcached:11211'] ECOMMERCE_ECOMMERCE_URL_ROOT: 'http://localhost:18130' ECOMMERCE_LMS_URL_ROOT: 'http://edx.devstack.lms:18000' ECOMMERCE_DISCOVERY_SERVICE_URL: 'http://edx.devstack.discovery:18381' +ECOMMERCE_USE_PYTHON3: true edx_django_service_is_devstack: true diff --git a/docker/build/ecommerce/ecommerce.yml b/docker/build/ecommerce/ecommerce.yml new file mode 100644 index 00000000000..ce3a96682dc --- /dev/null +++ b/docker/build/ecommerce/ecommerce.yml @@ -0,0 +1,130 @@ +--- + +AFFILIATE_COOKIE_KEY: dev_affiliate_id +API_ROOT: null +BACKEND_SERVICE_EDX_OAUTH2_KEY: ecommerce-backend-service-key +BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://localhost:18000/oauth2 +BACKEND_SERVICE_EDX_OAUTH2_SECRET: ecommerce-backend-service-secret +BROKER_URL: amqp://celery:celery@172.17.0.2:5672 +CACHES: + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_PREFIX: ecommerce + LOCATION: + - edx.devstack.memcached:11211 +COMPREHENSIVE_THEME_DIRS: +- /edx/var/edx-themes/edx-themes/ecommerce +- /edx/app/ecommerce/ecommerce/ecommerce/themes +CORS_ALLOW_CREDENTIALS: false +CORS_ORIGIN_WHITELIST: [] +CORS_URLS_REGEX: '' +CSRF_COOKIE_SECURE: false +DATABASES: + default: + ATOMIC_REQUESTS: true + CONN_MAX_AGE: 60 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: ecommerce + OPTIONS: + connect_timeout: 10 + init_command: SET sql_mode='STRICT_TRANS_TABLES' + PASSWORD: password + PORT: '3306' + USER: ecomm001 +DEFAULT_SITE_THEME: null +ECOMMERCE_URL_ROOT: http://localhost:18130 +EDX_API_KEY: PUT_YOUR_API_KEY_HERE +EDX_DRF_EXTENSIONS: + JWT_PAYLOAD_MERGEABLE_USER_ATTRIBUTES: + - tracking_context + JWT_PAYLOAD_USER_ATTRIBUTE_MAPPING: + administrator: is_staff + email: email + full_name: full_name + tracking_context: tracking_context + user_id: lms_user_id + OAUTH2_USER_INFO_URL: http://edx.devstack.lms:18000/oauth2/user_info +ENABLE_COMPREHENSIVE_THEMING: false +ENROLLMENT_FULFILLMENT_TIMEOUT: 7 +ENTERPRISE_SERVICE_URL: http://edx.devstack.lms:18000/enterprise/ +ENTERPRISE_LEARNER_PORTAL_HOSTNAME: localhost:8734 +EXTRA_APPS: [] +JWT_AUTH: + JWT_ALGORITHM: HS256 + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_DECODE_HANDLER: ecommerce.extensions.api.handlers.jwt_decode_handler + JWT_ISSUERS: + - AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret + - AUDIENCE: lms-key + ISSUER: ecommerce_worker + SECRET_KEY: lms-secret + JWT_LEEWAY: 1 + JWT_PUBLIC_SIGNING_JWK_SET: '' + JWT_SECRET_KEY: lms-secret + JWT_VERIFY_EXPIRATION: true +LANGUAGE_CODE: en +LANGUAGE_COOKIE_NAME: openedx-language-preference +LOGGING_ROOT_OVERRIDES: {} +LOGGING_SUBSECTION_OVERRIDES: {} +MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage + MEDIA_ROOT: /edx/var/ecommerce/media + MEDIA_URL: /media/ +OSCAR_FROM_EMAIL: oscar@example.com +PAYMENT_MICROFRONTEND_URL: null +PAYMENT_PROCESSOR_CONFIG: + edx: + cybersource: + access_key: SET-ME-PLEASE + apple_pay_country_code: US + apple_pay_merchant_id_certificate_path: /edx/etc/ssl/apple_pay_merchant.pem + apple_pay_merchant_id_domain_association: 'This value should also be in + private configuration. It, too, + + will span multiple lines. + + ' + apple_pay_merchant_identifier: merchant.com.example + cancel_page_url: /checkout/cancel-checkout/ + merchant_id: SET-ME-PLEASE + payment_page_url: https://testsecureacceptance.cybersource.com/pay + profile_id: SET-ME-PLEASE + receipt_page_url: /checkout/receipt/ + secret_key: SET-ME-PLEASE + send_level_2_3_details: true + soap_api_url: https://ics2wstest.ic3.com/commerce/1.x/transactionProcessor/CyberSourceTransaction_1.140.wsdl + sop_access_key: SET-ME-PLEASE + sop_payment_page_url: https://testsecureacceptance.cybersource.com/silent/pay + sop_profile_id: SET-ME-PLEASE + sop_secret_key: SET-ME-PLEASE + transaction_key: SET-ME-PLEASE + paypal: + cancel_url: /checkout/cancel-checkout/ + client_id: SET-ME-PLEASE + client_secret: SET-ME-PLEASE + error_url: /checkout/error/ + mode: sandbox + receipt_url: /checkout/receipt/ +PLATFORM_NAME: Your Platform Name Here +SAILTHRU_KEY: sailthru key here +SAILTHRU_SECRET: sailthru secret here +SECRET_KEY: Your secret key here +SESSION_COOKIE_SECURE: true +SESSION_EXPIRE_AT_BROWSER_CLOSE: false +SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 +SOCIAL_AUTH_EDX_OAUTH2_KEY: ecommerce-sso-key +SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout +SOCIAL_AUTH_EDX_OAUTH2_SECRET: ecommerce-sso-secret +SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 +SOCIAL_AUTH_REDIRECT_IS_HTTPS: false +STATICFILES_STORAGE: ecommerce.theming.storage.ThemeStorage +STATIC_ROOT: /edx/var/ecommerce/staticfiles +THEME_SCSS: sass/themes/default.scss +TIME_ZONE: UTC +USERNAME_REPLACEMENT_WORKER: OVERRIDE THIS WITH A VALID USERNAME +SDN_CHECK_API_URL: https://api.trade.gov/gateway/v1/consolidated_screening_list/search +SDN_CHECK_API_KEY: sdn search key here diff --git a/docker/build/ecomworker/Dockerfile b/docker/build/ecomworker/Dockerfile index 2a2286c46d8..51e327f0fe8 100644 --- a/docker/build/ecomworker/Dockerfile +++ b/docker/build/ecomworker/Dockerfile @@ -8,17 +8,20 @@ # with the currently checked-out configuration repo. FROM edxops/xenial-common:latest -MAINTAINER edxops +LABEL maintainer="edxops" ADD . /edx/app/edx_ansible/edx_ansible WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays COPY docker/build/ecomworker/ansible_overrides.yml / +COPY docker/build/ecomworker/ecomworker.yml /edx/etc/ecomworker.yml + RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook ecomworker.yml \ -c local -i '127.0.0.1,' \ -t "install:base,install:system-requirements,install:configuration,install:app-requirements,install:code" \ --extra-vars="@/ansible_overrides.yml" -USER root -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] +USER root +ENTRYPOINT ["/edx/app/ecomworker/devstack.sh"] +CMD ["start"] diff --git a/docker/build/ecomworker/ansible_overrides.yml b/docker/build/ecomworker/ansible_overrides.yml index 03dea2a3f78..9ab2431b912 100644 --- a/docker/build/ecomworker/ansible_overrides.yml +++ b/docker/build/ecomworker/ansible_overrides.yml @@ -1,3 +1,4 @@ --- DOCKER_TLD: "edx" +devstack: true diff --git a/docker/build/ecomworker/ecomworker.yml b/docker/build/ecomworker/ecomworker.yml new file mode 100644 index 00000000000..49c40e8a34d --- /dev/null +++ b/docker/build/ecomworker/ecomworker.yml @@ -0,0 +1,21 @@ +--- + +BROKER_URL: amqp://celery:celery@172.17.0.2:5672 +ECOMMERCE_API_ROOT: http://127.0.0.1:8002/api/v2/ +JWT_ISSUER: ecommerce_worker +JWT_SECRET_KEY: insecure-secret-key +MAX_FULFILLMENT_RETRIES: 11 +SAILTHRU: + SAILTHRU_ABANDONED_CART_DELAY: 60 + SAILTHRU_ABANDONED_CART_TEMPLATE: null + SAILTHRU_CACHE_TTL_SECONDS: 3600 + SAILTHRU_ENABLE: false + SAILTHRU_ENROLL_TEMPLATE: null + SAILTHRU_KEY: sailthru key here + SAILTHRU_MINIMUM_COST: 100 + SAILTHRU_PURCHASE_TEMPLATE: null + SAILTHRU_RETRY_ATTEMPTS: 6 + SAILTHRU_RETRY_SECONDS: 3600 + SAILTHRU_SECRET: sailthru secret here + SAILTHRU_UPGRADE_TEMPLATE: null +SITE_OVERRIDES: null diff --git a/docker/build/edxapp/Dockerfile b/docker/build/edxapp/Dockerfile index 401f00cabe1..4f298d32181 100644 --- a/docker/build/edxapp/Dockerfile +++ b/docker/build/edxapp/Dockerfile @@ -9,16 +9,19 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" USER root -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] +ENTRYPOINT ["/edx/app/edxapp/devstack.sh"] +CMD ["start"] ADD . /edx/app/edx_ansible/edx_ansible WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays COPY docker/build/edxapp/ansible_overrides.yml / COPY docker/build/edxapp/devstack.yml / -COPY docker/build/devstack/ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/build/edxapp/studio.yml /edx/etc/studio.yml +COPY docker/build/edxapp/lms.yml /edx/etc/lms.yml ARG OPENEDX_RELEASE=appsembler/tahoe/develop ARG APPSEMBLER_PLATFORM_REPO=https://github.com/appsembler/edx-platform.git diff --git a/docker/build/edxapp/ansible_overrides.yml b/docker/build/edxapp/ansible_overrides.yml index 1091e3c2cbe..cfa45509fe5 100644 --- a/docker/build/edxapp/ansible_overrides.yml +++ b/docker/build/edxapp/ansible_overrides.yml @@ -14,18 +14,17 @@ EDXAPP_LMS_GUNICORN_EXTRA_CONF: 'reload = True' EDXAPP_NO_PREREQ_INSTALL: 0 COMMON_SSH_PASSWORD_AUTH: "yes" -EDXAPP_LMS_BASE: "edx.devstack.lms:18000" EDXAPP_CMS_BASE: "edx.devstack.studio:18010" EDXAPP_OAUTH_ENFORCE_SECURE: false EDXAPP_LMS_BASE_SCHEME: http COMMON_SECURITY_UPDATES: true SECURITY_UPGRADE_ON_ANSIBLE: true -EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: true +EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: false EDXAPP_SEARCH_HOST: 'edx.devstack.elasticsearch' -EDXAPP_PYTHON_SANDBOX: false +EDXAPP_EDXAPP_SECRET_KEY: 'DUMMY KEY ONLY FOR TO DEVSTACK' EDXAPP_EDXAPP_SECRET_KEY: 'DUMMY KEY ONLY FOR TO DEVSTACK' diff --git a/docker/build/edxapp/lms.yml b/docker/build/edxapp/lms.yml new file mode 100644 index 00000000000..53071fd623c --- /dev/null +++ b/docker/build/edxapp/lms.yml @@ -0,0 +1,596 @@ +ACCOUNT_MICROFRONTEND_URL: null +ACE_CHANNEL_DEFAULT_EMAIL: django_email +ACE_CHANNEL_SAILTHRU_API_KEY: '' +ACE_CHANNEL_SAILTHRU_API_SECRET: '' +ACE_CHANNEL_SAILTHRU_DEBUG: true +ACE_CHANNEL_SAILTHRU_TEMPLATE_NAME: null +ACE_CHANNEL_TRANSACTIONAL_EMAIL: django_email +ACE_ENABLED_CHANNELS: +- django_email +ACE_ENABLED_POLICIES: +- bulk_email_optout +ACE_ROUTING_KEY: edx.lms.core.default +ACTIVATION_EMAIL_SUPPORT_LINK: '' +AFFILIATE_COOKIE_NAME: dev_affiliate_id +ALTERNATE_WORKER_QUEUES: cms +ANALYTICS_API_KEY: '' +ANALYTICS_API_URL: http://localhost:18100 +ANALYTICS_DASHBOARD_NAME: Your Platform Name Here Insights +ANALYTICS_DASHBOARD_URL: http://localhost:18110/courses +API_ACCESS_FROM_EMAIL: api-requests@example.com +API_ACCESS_MANAGER_EMAIL: api-access@example.com +API_DOCUMENTATION_URL: http://course-catalog-api-guide.readthedocs.io/en/latest/ +AUDIT_CERT_CUTOFF_DATE: null +AUTH_DOCUMENTATION_URL: http://course-catalog-api-guide.readthedocs.io/en/latest/authentication/index.html +AUTH_PASSWORD_VALIDATORS: +- NAME: django.contrib.auth.password_validation.UserAttributeSimilarityValidator +- NAME: util.password_policy_validators.MinimumLengthValidator + OPTIONS: + min_length: 2 +- NAME: util.password_policy_validators.MaximumLengthValidator + OPTIONS: + max_length: 75 +AWS_ACCESS_KEY_ID: null +AWS_QUERYSTRING_AUTH: false +AWS_S3_CUSTOM_DOMAIN: SET-ME-PLEASE (ex. bucket-name.s3.amazonaws.com) +AWS_SECRET_ACCESS_KEY: null +AWS_SES_REGION_ENDPOINT: email.us-east-1.amazonaws.com +AWS_SES_REGION_NAME: us-east-1 +AWS_STORAGE_BUCKET_NAME: SET-ME-PLEASE (ex. bucket-name) +BASE_COOKIE_DOMAIN: localhost +BLOCKSTORE_API_URL: http://localhost:18250/api/v1 +BLOCKSTORE_PUBLIC_URL_ROOT: http://localhost:18250 +BLOCK_STRUCTURES_SETTINGS: + COURSE_PUBLISH_TASK_DELAY: 30 + PRUNING_ACTIVE: false + TASK_DEFAULT_RETRY_DELAY: 30 + TASK_MAX_RETRIES: 5 +BRANCH_IO_KEY: '' +BUGS_EMAIL: bugs@example.com +BULK_EMAIL_DEFAULT_FROM_EMAIL: no-reply@example.com +BULK_EMAIL_EMAILS_PER_TASK: 500 +BULK_EMAIL_LOG_SENT_EMAILS: false +BULK_EMAIL_ROUTING_KEY_SMALL_JOBS: edx.lms.core.default +CACHES: + celery: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: celery + LOCATION: + - edx.devstack.memcached:11211 + TIMEOUT: '7200' + configuration: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: 1001c6274ca4 + LOCATION: + - edx.devstack.memcached:11211 + course_structure_cache: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: course_structure + LOCATION: + - edx.devstack.memcached:11211 + TIMEOUT: '7200' + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: default + LOCATION: + - edx.devstack.memcached:11211 + VERSION: '1' + general: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: general + LOCATION: + - edx.devstack.memcached:11211 + mongo_metadata_inheritance: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: mongo_metadata_inheritance + LOCATION: + - edx.devstack.memcached:11211 + TIMEOUT: 300 + staticfiles: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: 1001c6274ca4_general + LOCATION: + - edx.devstack.memcached:11211 +CAS_ATTRIBUTE_CALLBACK: '' +CAS_EXTRA_LOGIN_PARAMS: '' +CAS_SERVER_URL: '' +CC_PROCESSOR: + CyberSource: + MERCHANT_ID: '' + ORDERPAGE_VERSION: '7' + PURCHASE_ENDPOINT: '' + SERIAL_NUMBER: '' + SHARED_SECRET: '' + CyberSource2: + ACCESS_KEY: '' + PROFILE_ID: '' + PURCHASE_ENDPOINT: '' + SECRET_KEY: '' +CC_PROCESSOR_NAME: CyberSource2 +CELERY_BROKER_HOSTNAME: localhost +CELERY_BROKER_PASSWORD: celery +CELERY_BROKER_TRANSPORT: amqp +CELERY_BROKER_USER: celery +CELERY_BROKER_USE_SSL: false +CELERY_BROKER_VHOST: '' +CELERY_EVENT_QUEUE_TTL: null +CELERY_QUEUES: +- edx.lms.core.default +- edx.lms.core.high +- edx.lms.core.high_mem +CELERY_TIMEZONE: UTC +CERTIFICATE_TEMPLATE_LANGUAGES: + en: English + es: Español +CERT_QUEUE: certificates +CMS_BASE: edx.devstack.studio:18010 +CODE_JAIL: + limits: + CPU: 1 + FSIZE: 1048576 + PROXY: 0 + REALTIME: 3 + VMEM: 536870912 + python_bin: '' + user: sandbox +COMMENTS_SERVICE_KEY: password +COMMENTS_SERVICE_URL: http://localhost:18080 +COMPREHENSIVE_THEME_DIRS: +- '' +COMPREHENSIVE_THEME_LOCALE_PATHS: [] +CONTACT_EMAIL: info@example.com +CONTACT_MAILING_ADDRESS: SET-ME-PLEASE +CONTENTSTORE: + ADDITIONAL_OPTIONS: {} + DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: SECONDARY_PREFERRED + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp + ENGINE: xmodule.contentstore.mongo.MongoContentStore + OPTIONS: + auth_source: null + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + ssl: false + user: edxapp +CORS_ORIGIN_ALLOW_ALL: false +CORS_ORIGIN_WHITELIST: [] +COURSES_WITH_UNSAFE_CODE: [] +COURSE_ABOUT_VISIBILITY_PERMISSION: see_exists +COURSE_CATALOG_API_URL: http://localhost:8008/api/v1 +COURSE_CATALOG_VISIBILITY_PERMISSION: see_exists +CREDENTIALS_INTERNAL_SERVICE_URL: http://localhost:8005 +CREDENTIALS_PUBLIC_SERVICE_URL: http://localhost:8005 +CREDIT_HELP_LINK_URL: '' +CREDIT_PROVIDER_SECRET_KEYS: {} +CROSS_DOMAIN_CSRF_COOKIE_DOMAIN: '' +CROSS_DOMAIN_CSRF_COOKIE_NAME: '' +CSRF_COOKIE_SECURE: false +CSRF_TRUSTED_ORIGINS: [] +DATABASES: + default: + ATOMIC_REQUESTS: true + CONN_MAX_AGE: 0 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: edxapp + OPTIONS: {} + PASSWORD: password + PORT: '3306' + USER: edxapp001 + read_replica: + CONN_MAX_AGE: 0 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: edxapp + OPTIONS: {} + PASSWORD: password + PORT: '3306' + USER: edxapp001 + student_module_history: + CONN_MAX_AGE: 0 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: edxapp_csmh + OPTIONS: {} + PASSWORD: password + PORT: '3306' + USER: edxapp001 +DATA_DIR: /edx/var/edxapp +DEFAULT_COURSE_VISIBILITY_IN_CATALOG: both +DEFAULT_FEEDBACK_EMAIL: feedback@example.com +DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage +DEFAULT_FROM_EMAIL: registration@example.com +DEFAULT_JWT_ISSUER: + AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret +DEFAULT_MOBILE_AVAILABLE: false +DEFAULT_SITE_THEME: '' +DEPRECATED_ADVANCED_COMPONENT_TYPES: [] +DJFS: + directory_root: /edx/var/edxapp/django-pyfs/static/django-pyfs + type: osfs + url_root: /static/django-pyfs +DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: SECONDARY_PREFERRED + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp +ECOMMERCE_API_SIGNING_KEY: lms-secret +ECOMMERCE_API_URL: http://localhost:8002/api/v2 +ECOMMERCE_PUBLIC_URL_ROOT: http://localhost:8002 +EDXMKTG_USER_INFO_COOKIE_NAME: edx-user-info +EDXNOTES_INTERNAL_API: http://edx.devstack.edx_notes_api:18120/api/v1 +EDXNOTES_PUBLIC_API: http://localhost:18120/api/v1 +EDX_API_KEY: PUT_YOUR_API_KEY_HERE +EDX_PLATFORM_REVISION: master +ELASTIC_SEARCH_CONFIG: +- host: edx.devstack.elasticsearch + port: 9200 + use_ssl: false +EMAIL_BACKEND: django.core.mail.backends.smtp.EmailBackend +EMAIL_HOST: localhost +EMAIL_HOST_PASSWORD: '' +EMAIL_HOST_USER: '' +EMAIL_PORT: 25 +EMAIL_USE_TLS: false +ENABLE_COMPREHENSIVE_THEMING: false +ENTERPRISE_API_URL: http://edx.devstack.lms:18000/enterprise/api/v1 +ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES: +- audit +- honor +ENTERPRISE_CUSTOMER_SUCCESS_EMAIL: customersuccess@edx.org +ENTERPRISE_ENROLLMENT_API_URL: http://edx.devstack.lms:18000/api/enrollment/v1/ +ENTERPRISE_INTEGRATIONS_EMAIL: enterprise-integrations@edx.org +ENTERPRISE_LEARNER_PORTAL_HOSTNAME: localhost:8734 +ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS: {} +ENTERPRISE_SERVICE_WORKER_USERNAME: enterprise_worker +ENTERPRISE_SUPPORT_URL: '' +ENTERPRISE_TAGLINE: '' +EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST: [] +EXTRA_MIDDLEWARE_CLASSES: [] +FACEBOOK_API_VERSION: v2.1 +FACEBOOK_APP_ID: FACEBOOK_APP_ID +FACEBOOK_APP_SECRET: FACEBOOK_APP_SECRET +FEATURES: + AUTOMATIC_AUTH_FOR_TESTING: false + CUSTOM_COURSES_EDX: false + ENABLE_COMBINED_LOGIN_REGISTRATION: true + ENABLE_CORS_HEADERS: false + ENABLE_COUNTRY_ACCESS: false + ENABLE_CREDIT_API: false + ENABLE_CREDIT_ELIGIBILITY: false + ENABLE_CROSS_DOMAIN_CSRF_COOKIE: false + ENABLE_CSMH_EXTENDED: true + ENABLE_DISCUSSION_HOME_PANEL: true + ENABLE_DISCUSSION_SERVICE: true + ENABLE_EDXNOTES: true + ENABLE_ENROLLMENT_RESET: false + ENABLE_GRADE_DOWNLOADS: true + ENABLE_INSTRUCTOR_ANALYTICS: false + ENABLE_MKTG_SITE: false + ENABLE_MOBILE_REST_API: false + ENABLE_OAUTH2_PROVIDER: false + ENABLE_PUBLISHER: false + ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES: true + ENABLE_SPECIAL_EXAMS: false + ENABLE_SYSADMIN_DASHBOARD: false + ENABLE_THIRD_PARTY_AUTH: true + ENABLE_VIDEO_UPLOAD_PIPELINE: false + PREVIEW_LMS_BASE: preview.localhost:18000 + SHOW_FOOTER_LANGUAGE_SELECTOR: false + SHOW_HEADER_LANGUAGE_SELECTOR: false +FEEDBACK_SUBMISSION_EMAIL: '' +FERNET_KEYS: +- DUMMY KEY CHANGE BEFORE GOING TO PRODUCTION +FILE_UPLOAD_STORAGE_BUCKET_NAME: SET-ME-PLEASE (ex. bucket-name) +FILE_UPLOAD_STORAGE_PREFIX: submissions_attachments +FINANCIAL_REPORTS: + BUCKET: null + ROOT_PATH: sandbox + STORAGE_TYPE: localfs +FOOTER_ORGANIZATION_IMAGE: images/logo.png +GITHUB_REPO_ROOT: /edx/var/edxapp/data +GIT_REPO_DIR: /edx/var/edxapp/course_repos +GOOGLE_ANALYTICS_ACCOUNT: null +GOOGLE_ANALYTICS_LINKEDIN: '' +GOOGLE_ANALYTICS_TRACKING_ID: '' +GOOGLE_SITE_VERIFICATION_ID: '' +GRADES_DOWNLOAD: + BUCKET: '' + ROOT_PATH: '' + STORAGE_CLASS: django.core.files.storage.FileSystemStorage + STORAGE_KWARGS: + location: /tmp/edx-s3/grades + STORAGE_TYPE: '' +HELP_TOKENS_BOOKS: + course_author: http://edx.readthedocs.io/projects/open-edx-building-and-running-a-course + learner: http://edx.readthedocs.io/projects/open-edx-learner-guide +ICP_LICENSE: null +ICP_LICENSE_INFO: {} +IDA_LOGOUT_URI_LIST: [] +ID_VERIFICATION_SUPPORT_LINK: '' +JWT_AUTH: + JWT_AUDIENCE: lms-key + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUER: http://localhost:18000/oauth2 + JWT_ISSUERS: + - AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret + JWT_PRIVATE_SIGNING_JWK: None + JWT_PUBLIC_SIGNING_JWK_SET: '' + JWT_SECRET_KEY: lms-secret + JWT_SIGNING_ALGORITHM: null +JWT_EXPIRATION: 30 +JWT_ISSUER: http://localhost:18000/oauth2 +JWT_PRIVATE_SIGNING_KEY: null +LANGUAGE_CODE: en +LANGUAGE_COOKIE: openedx-language-preference +LEARNER_PORTAL_URL_ROOT: https://learner-portal-edx.devstack.lms:18000 +LMS_BASE: edx.devstack.lms:18000 +LMS_INTERNAL_ROOT_URL: http://edx.devstack.lms:18000 +LMS_ROOT_URL: http://edx.devstack.lms:18000 +LOCAL_LOGLEVEL: INFO +LOGGING_ENV: sandbox +LOGIN_REDIRECT_WHITELIST: [] +LOG_DIR: /edx/var/log/edx +LTI_AGGREGATE_SCORE_PASSBACK_DELAY: 900 +LTI_USER_EMAIL_DOMAIN: lti.example.com +MAILCHIMP_NEW_USER_LIST_ID: null +MAINTENANCE_BANNER_TEXT: Sample banner message +MEDIA_ROOT: /edx/var/edxapp/media/ +MEDIA_URL: /media/ +MICROSITE_CONFIGURATION: {} +MICROSITE_ROOT_DIR: /edx/app/edxapp/edx-microsite +MKTG_URLS: {} +MKTG_URL_LINK_MAP: {} +MOBILE_STORE_URLS: {} +MODULESTORE: + default: + ENGINE: xmodule.modulestore.mixed.MixedModuleStore + OPTIONS: + mappings: {} + stores: + - DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: SECONDARY_PREFERRED + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp + ENGINE: xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore + NAME: split + OPTIONS: + default_class: xmodule.hidden_module.HiddenDescriptor + fs_root: /edx/var/edxapp/data + render_template: edxmako.shortcuts.render_to_string + - DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: PRIMARY + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp + ENGINE: xmodule.modulestore.mongo.DraftMongoModuleStore + NAME: draft + OPTIONS: + default_class: xmodule.hidden_module.HiddenDescriptor + fs_root: /edx/var/edxapp/data + render_template: edxmako.shortcuts.render_to_string +OAUTH_DELETE_EXPIRED: true +OAUTH_ENFORCE_SECURE: false +OAUTH_EXPIRE_CONFIDENTIAL_CLIENT_DAYS: 365 +OAUTH_EXPIRE_PUBLIC_CLIENT_DAYS: 30 +OPTIMIZELY_PROJECT_ID: null +ORA2_FILE_PREFIX: default_env-default_deployment/ora2 +ORDER_HISTORY_MICROFRONTEND_URL: null +PAID_COURSE_REGISTRATION_CURRENCY: +- usd +- $ +PARENTAL_CONSENT_AGE_LIMIT: 13 +PARTNER_SUPPORT_EMAIL: '' +PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG: + ENFORCE_COMPLIANCE_ON_LOGIN: false +PASSWORD_RESET_SUPPORT_LINK: '' +PAYMENT_SUPPORT_EMAIL: billing@example.com +PDF_RECEIPT_BILLING_ADDRESS: 'Enter your receipt billing + + address here. + + ' +PDF_RECEIPT_COBRAND_LOGO_PATH: '' +PDF_RECEIPT_DISCLAIMER_TEXT: 'ENTER YOUR RECEIPT DISCLAIMER TEXT HERE. + + ' +PDF_RECEIPT_FOOTER_TEXT: 'Enter your receipt footer text here. + + ' +PDF_RECEIPT_LOGO_PATH: '' +PDF_RECEIPT_TAX_ID: 00-0000000 +PDF_RECEIPT_TAX_ID_LABEL: fake Tax ID +PDF_RECEIPT_TERMS_AND_CONDITIONS: 'Enter your receipt terms and conditions here. + + ' +PLATFORM_DESCRIPTION: Your Platform Description Here +PLATFORM_FACEBOOK_ACCOUNT: http://www.facebook.com/YourPlatformFacebookAccount +PLATFORM_NAME: Your Platform Name Here +PLATFORM_TWITTER_ACCOUNT: '@YourPlatformTwitterAccount' +POLICY_CHANGE_GRADES_ROUTING_KEY: edx.lms.core.default +PRESS_EMAIL: press@example.com +PROCTORING_BACKENDS: + DEFAULT: 'null' + 'null': {} +PROCTORING_SETTINGS: {} +PROFILE_IMAGE_BACKEND: + class: openedx.core.storage.OverwriteStorage + options: + base_url: /media/profile-images/ + location: /edx/var/edxapp/media/profile-images/ +PROFILE_IMAGE_MAX_BYTES: 1048576 +PROFILE_IMAGE_MIN_BYTES: 100 +PROFILE_IMAGE_SECRET_KEY: placeholder_secret_key +PROFILE_IMAGE_SIZES_MAP: + full: 500 + large: 120 + medium: 50 + small: 30 +PROFILE_MICROFRONTEND_URL: null +PROGRAM_CERTIFICATES_ROUTING_KEY: edx.lms.core.default +PROGRAM_MANAGER_MICROFRONTEND_URL: null +RECALCULATE_GRADES_ROUTING_KEY: edx.lms.core.default +REGISTRATION_EXTRA_FIELDS: + city: hidden + confirm_email: hidden + country: required + gender: optional + goals: optional + honor_code: required + level_of_education: optional + mailing_address: hidden + terms_of_service: hidden + year_of_birth: optional +RETIRED_EMAIL_DOMAIN: retired.invalid +RETIRED_EMAIL_PREFIX: retired__user_ +RETIRED_USERNAME_PREFIX: retired__user_ +RETIRED_USER_SALTS: +- OVERRIDE ME WITH A RANDOM VALUE +- ROTATE SALTS BY APPENDING NEW VALUES +RETIREMENT_SERVICE_WORKER_USERNAME: retirement_worker +RETIREMENT_STATES: +- PENDING +- ERRORED +- ABORTED +- COMPLETE +SECRET_KEY: DUMMY KEY ONLY FOR TO DEVSTACK +SEGMENT_KEY: null +SERVER_EMAIL: devops@example.com +SESSION_COOKIE_DOMAIN: '' +SESSION_COOKIE_NAME: sessionid +SESSION_COOKIE_SECURE: false +SESSION_SAVE_EVERY_REQUEST: false +SITE_NAME: localhost +SOCIAL_AUTH_OAUTH_SECRETS: '' +SOCIAL_AUTH_SAML_SP_PRIVATE_KEY: '' +SOCIAL_AUTH_SAML_SP_PRIVATE_KEY_DICT: {} +SOCIAL_AUTH_SAML_SP_PUBLIC_CERT: '' +SOCIAL_AUTH_SAML_SP_PUBLIC_CERT_DICT: {} +SOCIAL_MEDIA_FOOTER_URLS: {} +SOCIAL_SHARING_SETTINGS: + CERTIFICATE_FACEBOOK: false + CERTIFICATE_TWITTER: false + CUSTOM_COURSE_URLS: false + DASHBOARD_FACEBOOK: false + DASHBOARD_TWITTER: false +STATIC_ROOT_BASE: /edx/var/edxapp/staticfiles +STATIC_URL_BASE: /static/ +STUDIO_NAME: Studio +STUDIO_SHORT_NAME: Studio +SUPPORT_SITE_LINK: '' +SWIFT_AUTH_URL: null +SWIFT_AUTH_VERSION: null +SWIFT_KEY: null +SWIFT_REGION_NAME: null +SWIFT_TEMP_URL_DURATION: 1800 +SWIFT_TEMP_URL_KEY: null +SWIFT_TENANT_ID: null +SWIFT_TENANT_NAME: null +SWIFT_USERNAME: null +SWIFT_USE_TEMP_URLS: false +SYSLOG_SERVER: '' +SYSTEM_WIDE_ROLE_CLASSES: [] +TECH_SUPPORT_EMAIL: technical@example.com +TIME_ZONE: America/New_York +TRACKING_SEGMENTIO_WEBHOOK_SECRET: '' +UNIVERSITY_EMAIL: university@example.com +USERNAME_REPLACEMENT_WORKER: OVERRIDE THIS WITH A VALID USERNAME +VERIFY_STUDENT: + DAYS_GOOD_FOR: 365 + EXPIRING_SOON_WINDOW: 28 +VIDEO_CDN_URL: + EXAMPLE_COUNTRY_CODE: http://example.com/edx/video?s3_url= +VIDEO_IMAGE_MAX_AGE: 31536000 +VIDEO_IMAGE_SETTINGS: + DIRECTORY_PREFIX: video-images/ + STORAGE_KWARGS: + base_url: /media/ + location: /edx/var/edxapp/media/ + VIDEO_IMAGE_MAX_BYTES: 2097152 + VIDEO_IMAGE_MIN_BYTES: 2048 +VIDEO_TRANSCRIPTS_MAX_AGE: 31536000 +VIDEO_TRANSCRIPTS_SETTINGS: + DIRECTORY_PREFIX: video-transcripts/ + STORAGE_KWARGS: + base_url: /media/ + location: /edx/var/edxapp/media/ + VIDEO_TRANSCRIPTS_MAX_BYTES: 3145728 +VIDEO_UPLOAD_PIPELINE: + BUCKET: '' + ROOT_PATH: '' +WIKI_ENABLED: true +WRITABLE_GRADEBOOK_URL: null +XBLOCK_FS_STORAGE_BUCKET: null +XBLOCK_FS_STORAGE_PREFIX: null +XBLOCK_SETTINGS: {} +XQUEUE_INTERFACE: + basic_auth: + - edx + - edx + django_auth: + password: password + username: lms + url: http://edx.devstack.xqueue:18040 +X_FRAME_OPTIONS: DENY +YOUTUBE_API_KEY: PUT_YOUR_API_KEY_HERE +ZENDESK_API_KEY: '' +ZENDESK_CUSTOM_FIELDS: {} +ZENDESK_GROUP_ID_MAPPING: {} +ZENDESK_OAUTH_ACCESS_TOKEN: '' +ZENDESK_URL: '' +ZENDESK_USER: '' + diff --git a/docker/build/edxapp/studio.yml b/docker/build/edxapp/studio.yml new file mode 100644 index 00000000000..8271854848f --- /dev/null +++ b/docker/build/edxapp/studio.yml @@ -0,0 +1,491 @@ +ACTIVATION_EMAIL_SUPPORT_LINK: '' +AFFILIATE_COOKIE_NAME: dev_affiliate_id +ALTERNATE_WORKER_QUEUES: lms +ANALYTICS_DASHBOARD_NAME: Your Platform Name Here Insights +ANALYTICS_DASHBOARD_URL: http://localhost:18110/courses +AUTH_PASSWORD_VALIDATORS: +- NAME: django.contrib.auth.password_validation.UserAttributeSimilarityValidator +- NAME: util.password_policy_validators.MinimumLengthValidator + OPTIONS: + min_length: 2 +- NAME: util.password_policy_validators.MaximumLengthValidator + OPTIONS: + max_length: 75 +AWS_ACCESS_KEY_ID: null +AWS_QUERYSTRING_AUTH: false +AWS_S3_CUSTOM_DOMAIN: SET-ME-PLEASE (ex. bucket-name.s3.amazonaws.com) +AWS_SECRET_ACCESS_KEY: null +AWS_SES_REGION_ENDPOINT: email.us-east-1.amazonaws.com +AWS_SES_REGION_NAME: us-east-1 +AWS_STORAGE_BUCKET_NAME: SET-ME-PLEASE (ex. bucket-name) +BASE_COOKIE_DOMAIN: localhost +BLOCKSTORE_API_URL: http://localhost:18250/api/v1 +BLOCKSTORE_PUBLIC_URL_ROOT: http://localhost:18250 +BLOCK_STRUCTURES_SETTINGS: + COURSE_PUBLISH_TASK_DELAY: 30 + PRUNING_ACTIVE: false + TASK_DEFAULT_RETRY_DELAY: 30 + TASK_MAX_RETRIES: 5 +BRANCH_IO_KEY: '' +BUGS_EMAIL: bugs@example.com +BULK_EMAIL_DEFAULT_FROM_EMAIL: no-reply@example.com +BULK_EMAIL_EMAILS_PER_TASK: 500 +BULK_EMAIL_LOG_SENT_EMAILS: false +CACHES: + celery: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: celery + LOCATION: + - edx.devstack.memcached:11211 + TIMEOUT: '7200' + configuration: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: 1001c6274ca4 + LOCATION: + - edx.devstack.memcached:11211 + course_structure_cache: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: course_structure + LOCATION: + - edx.devstack.memcached:11211 + TIMEOUT: '7200' + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: default + LOCATION: + - edx.devstack.memcached:11211 + VERSION: '1' + general: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: general + LOCATION: + - edx.devstack.memcached:11211 + mongo_metadata_inheritance: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: mongo_metadata_inheritance + LOCATION: + - edx.devstack.memcached:11211 + TIMEOUT: 300 + staticfiles: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_FUNCTION: util.memcache.safe_key + KEY_PREFIX: 1001c6274ca4_general + LOCATION: + - edx.devstack.memcached:11211 +CAS_ATTRIBUTE_CALLBACK: '' +CAS_EXTRA_LOGIN_PARAMS: '' +CAS_SERVER_URL: '' +CELERY_BROKER_HOSTNAME: localhost +CELERY_BROKER_PASSWORD: celery +CELERY_BROKER_TRANSPORT: amqp +CELERY_BROKER_USER: celery +CELERY_BROKER_USE_SSL: false +CELERY_BROKER_VHOST: '' +CELERY_EVENT_QUEUE_TTL: null +CELERY_QUEUES: +- edx.cms.core.default +- edx.cms.core.high +CELERY_TIMEZONE: UTC +CERTIFICATE_TEMPLATE_LANGUAGES: + en: English + es: Español +CERT_QUEUE: certificates +CMS_BASE: edx.devstack.studio:18010 +CODE_JAIL: + limits: + CPU: 1 + FSIZE: 1048576 + PROXY: 0 + REALTIME: 3 + VMEM: 536870912 + python_bin: '' + user: sandbox +COMMENTS_SERVICE_KEY: password +COMMENTS_SERVICE_URL: http://localhost:18080 +COMPREHENSIVE_THEME_DIRS: +- '' +COMPREHENSIVE_THEME_LOCALE_PATHS: [] +CONTACT_EMAIL: info@example.com +CONTENTSTORE: + ADDITIONAL_OPTIONS: {} + DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: PRIMARY + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp + ENGINE: xmodule.contentstore.mongo.MongoContentStore + OPTIONS: + auth_source: null + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + ssl: false + user: edxapp +CORS_ORIGIN_ALLOW_ALL: false +CORS_ORIGIN_WHITELIST: [] +COURSES_WITH_UNSAFE_CODE: [] +COURSE_ABOUT_VISIBILITY_PERMISSION: see_exists +COURSE_CATALOG_API_URL: http://localhost:8008/api/v1 +COURSE_CATALOG_VISIBILITY_PERMISSION: see_exists +COURSE_IMPORT_EXPORT_BUCKET: '' +CREDENTIALS_INTERNAL_SERVICE_URL: http://localhost:8005 +CREDENTIALS_PUBLIC_SERVICE_URL: http://localhost:8005 +CREDIT_PROVIDER_SECRET_KEYS: {} +CROSS_DOMAIN_CSRF_COOKIE_DOMAIN: '' +CROSS_DOMAIN_CSRF_COOKIE_NAME: '' +CSRF_COOKIE_SECURE: false +CSRF_TRUSTED_ORIGINS: [] +DATABASES: + default: + ATOMIC_REQUESTS: true + CONN_MAX_AGE: 0 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: edxapp + OPTIONS: {} + PASSWORD: password + PORT: '3306' + USER: edxapp001 + read_replica: + CONN_MAX_AGE: 0 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: edxapp + OPTIONS: {} + PASSWORD: password + PORT: '3306' + USER: edxapp001 + student_module_history: + CONN_MAX_AGE: 0 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: edxapp_csmh + OPTIONS: {} + PASSWORD: password + PORT: '3306' + USER: edxapp001 +DATA_DIR: /edx/var/edxapp +DEFAULT_COURSE_VISIBILITY_IN_CATALOG: both +DEFAULT_FEEDBACK_EMAIL: feedback@example.com +DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage +DEFAULT_FROM_EMAIL: registration@example.com +DEFAULT_JWT_ISSUER: + AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret +DEFAULT_MOBILE_AVAILABLE: false +DEFAULT_SITE_THEME: '' +DEPRECATED_ADVANCED_COMPONENT_TYPES: [] +DJFS: + directory_root: /edx/var/edxapp/django-pyfs/static/django-pyfs + type: osfs + url_root: /static/django-pyfs +DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: PRIMARY + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp +ECOMMERCE_API_SIGNING_KEY: lms-secret +ECOMMERCE_API_URL: http://localhost:8002/api/v2 +ECOMMERCE_PUBLIC_URL_ROOT: http://localhost:8002 +EDXMKTG_USER_INFO_COOKIE_NAME: edx-user-info +EDX_PLATFORM_REVISION: master +ELASTIC_SEARCH_CONFIG: +- host: edx.devstack.elasticsearch + port: 9200 + use_ssl: false +EMAIL_BACKEND: django.core.mail.backends.smtp.EmailBackend +EMAIL_HOST: localhost +EMAIL_HOST_PASSWORD: '' +EMAIL_HOST_USER: '' +EMAIL_PORT: 25 +EMAIL_USE_TLS: false +ENABLE_COMPREHENSIVE_THEMING: false +ENTERPRISE_API_URL: http://edx.devstack.lms:18000/enterprise/api/v1 +ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS: {} +ENTERPRISE_SERVICE_WORKER_USERNAME: enterprise_worker +EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST: [] +EXTRA_MIDDLEWARE_CLASSES: [] +FACEBOOK_API_VERSION: v2.1 +FACEBOOK_APP_ID: FACEBOOK_APP_ID +FACEBOOK_APP_SECRET: FACEBOOK_APP_SECRET +FEATURES: + AUTOMATIC_AUTH_FOR_TESTING: false + CUSTOM_COURSES_EDX: false + ENABLE_COMBINED_LOGIN_REGISTRATION: true + ENABLE_CORS_HEADERS: false + ENABLE_COUNTRY_ACCESS: false + ENABLE_CREDIT_API: false + ENABLE_CREDIT_ELIGIBILITY: false + ENABLE_CROSS_DOMAIN_CSRF_COOKIE: false + ENABLE_CSMH_EXTENDED: true + ENABLE_DISCUSSION_HOME_PANEL: true + ENABLE_DISCUSSION_SERVICE: true + ENABLE_EDXNOTES: true + ENABLE_ENROLLMENT_RESET: false + ENABLE_GRADE_DOWNLOADS: true + ENABLE_INSTRUCTOR_ANALYTICS: false + ENABLE_MKTG_SITE: false + ENABLE_MOBILE_REST_API: false + ENABLE_OAUTH2_PROVIDER: false + ENABLE_PUBLISHER: false + ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES: true + ENABLE_SPECIAL_EXAMS: false + ENABLE_SYSADMIN_DASHBOARD: false + ENABLE_THIRD_PARTY_AUTH: true + ENABLE_VIDEO_UPLOAD_PIPELINE: false + PREVIEW_LMS_BASE: preview.localhost:18000 + SHOW_FOOTER_LANGUAGE_SELECTOR: false + SHOW_HEADER_LANGUAGE_SELECTOR: false +FEEDBACK_SUBMISSION_EMAIL: '' +FERNET_KEYS: +- DUMMY KEY CHANGE BEFORE GOING TO PRODUCTION +FILE_UPLOAD_STORAGE_BUCKET_NAME: SET-ME-PLEASE (ex. bucket-name) +FILE_UPLOAD_STORAGE_PREFIX: submissions_attachments +FINANCIAL_REPORTS: + BUCKET: null + ROOT_PATH: sandbox + STORAGE_TYPE: localfs +FOOTER_ORGANIZATION_IMAGE: images/logo.png +GITHUB_REPO_ROOT: /edx/var/edxapp/data +GIT_REPO_EXPORT_DIR: /edx/var/edxapp/export_course_repos +GOOGLE_ANALYTICS_ACCOUNT: null +GRADES_DOWNLOAD: + BUCKET: '' + ROOT_PATH: '' + STORAGE_CLASS: django.core.files.storage.FileSystemStorage + STORAGE_KWARGS: + location: /tmp/edx-s3/grades + STORAGE_TYPE: '' +HELP_TOKENS_BOOKS: + course_author: http://edx.readthedocs.io/projects/open-edx-building-and-running-a-course + learner: http://edx.readthedocs.io/projects/open-edx-learner-guide +ICP_LICENSE: null +ICP_LICENSE_INFO: {} +IDA_LOGOUT_URI_LIST: [] +ID_VERIFICATION_SUPPORT_LINK: '' +JWT_AUTH: + JWT_AUDIENCE: lms-key + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUER: http://localhost:18000/oauth2 + JWT_ISSUERS: + - AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret + JWT_PRIVATE_SIGNING_JWK: None + JWT_PUBLIC_SIGNING_JWK_SET: '' + JWT_SECRET_KEY: lms-secret + JWT_SIGNING_ALGORITHM: null +JWT_EXPIRATION: 30 +JWT_ISSUER: http://localhost:18000/oauth2 +JWT_PRIVATE_SIGNING_KEY: null +LANGUAGE_CODE: en +LANGUAGE_COOKIE: openedx-language-preference +LEARNER_PORTAL_URL_ROOT: https://learner-portal-edx.devstack.lms:18000 +LMS_BASE: edx.devstack.lms:18000 +LMS_INTERNAL_ROOT_URL: http://edx.devstack.lms:18000 +LMS_ROOT_URL: http://edx.devstack.lms:18000 +LOCAL_LOGLEVEL: INFO +LOGGING_ENV: sandbox +LOGIN_REDIRECT_WHITELIST: [] +LOG_DIR: /edx/var/log/edx +MAINTENANCE_BANNER_TEXT: Sample banner message +MEDIA_ROOT: /edx/var/edxapp/media/ +MEDIA_URL: /media/ +MICROSITE_CONFIGURATION: {} +MICROSITE_ROOT_DIR: /edx/app/edxapp/edx-microsite +MKTG_URLS: {} +MKTG_URL_LINK_MAP: {} +MOBILE_STORE_URLS: {} +MODULESTORE: + default: + ENGINE: xmodule.modulestore.mixed.MixedModuleStore + OPTIONS: + mappings: {} + stores: + - DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: PRIMARY + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp + ENGINE: xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore + NAME: split + OPTIONS: + default_class: xmodule.hidden_module.HiddenDescriptor + fs_root: /edx/var/edxapp/data + render_template: edxmako.shortcuts.render_to_string + - DOC_STORE_CONFIG: + auth_source: null + collection: modulestore + connectTimeoutMS: 2000 + db: edxapp + host: + - edx.devstack.mongo + password: password + port: 27017 + read_preference: PRIMARY + replicaSet: '' + socketTimeoutMS: 3000 + ssl: false + user: edxapp + ENGINE: xmodule.modulestore.mongo.DraftMongoModuleStore + NAME: draft + OPTIONS: + default_class: xmodule.hidden_module.HiddenDescriptor + fs_root: /edx/var/edxapp/data + render_template: edxmako.shortcuts.render_to_string +ORA2_FILE_PREFIX: default_env-default_deployment/ora2 +PARSE_KEYS: {} +PARTNER_SUPPORT_EMAIL: '' +PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG: + ENFORCE_COMPLIANCE_ON_LOGIN: false +PASSWORD_RESET_SUPPORT_LINK: '' +PAYMENT_SUPPORT_EMAIL: billing@example.com +PLATFORM_DESCRIPTION: Your Platform Description Here +PLATFORM_FACEBOOK_ACCOUNT: http://www.facebook.com/YourPlatformFacebookAccount +PLATFORM_NAME: Your Platform Name Here +PLATFORM_TWITTER_ACCOUNT: '@YourPlatformTwitterAccount' +POLICY_CHANGE_GRADES_ROUTING_KEY: edx.lms.core.default +PRESS_EMAIL: press@example.com +PROCTORING_BACKENDS: + DEFAULT: 'null' + 'null': {} +PROCTORING_SETTINGS: {} +REGISTRATION_EXTRA_FIELDS: + city: hidden + confirm_email: hidden + country: required + gender: optional + goals: optional + honor_code: required + level_of_education: optional + mailing_address: hidden + terms_of_service: hidden + year_of_birth: optional +RETIRED_EMAIL_DOMAIN: retired.invalid +RETIRED_EMAIL_PREFIX: retired__user_ +RETIRED_USERNAME_PREFIX: retired__user_ +RETIRED_USER_SALTS: +- OVERRIDE ME WITH A RANDOM VALUE +- ROTATE SALTS BY APPENDING NEW VALUES +RETIREMENT_SERVICE_WORKER_USERNAME: retirement_worker +RETIREMENT_STATES: +- PENDING +- ERRORED +- ABORTED +- COMPLETE +SECRET_KEY: DUMMY KEY ONLY FOR TO DEVSTACK +SEGMENT_KEY: null +SERVER_EMAIL: devops@example.com +SESSION_COOKIE_DOMAIN: '' +SESSION_COOKIE_NAME: sessionid +SESSION_COOKIE_SECURE: false +SESSION_SAVE_EVERY_REQUEST: false +SITE_NAME: localhost +SOCIAL_AUTH_SAML_SP_PRIVATE_KEY: '' +SOCIAL_AUTH_SAML_SP_PRIVATE_KEY_DICT: {} +SOCIAL_AUTH_SAML_SP_PUBLIC_CERT: '' +SOCIAL_AUTH_SAML_SP_PUBLIC_CERT_DICT: {} +SOCIAL_MEDIA_FOOTER_URLS: {} +SOCIAL_SHARING_SETTINGS: + CERTIFICATE_FACEBOOK: false + CERTIFICATE_TWITTER: false + CUSTOM_COURSE_URLS: false + DASHBOARD_FACEBOOK: false + DASHBOARD_TWITTER: false +STATIC_ROOT_BASE: /edx/var/edxapp/staticfiles +STATIC_URL_BASE: /static/ +STUDIO_NAME: Studio +STUDIO_SHORT_NAME: Studio +SUPPORT_SITE_LINK: '' +SWIFT_AUTH_URL: null +SWIFT_AUTH_VERSION: null +SWIFT_KEY: null +SWIFT_REGION_NAME: null +SWIFT_TEMP_URL_DURATION: 1800 +SWIFT_TEMP_URL_KEY: null +SWIFT_TENANT_ID: null +SWIFT_TENANT_NAME: null +SWIFT_USERNAME: null +SWIFT_USE_TEMP_URLS: false +SYSLOG_SERVER: '' +SYSTEM_WIDE_ROLE_CLASSES: [] +TECH_SUPPORT_EMAIL: technical@example.com +TIME_ZONE: America/New_York +UNIVERSITY_EMAIL: university@example.com +USERNAME_REPLACEMENT_WORKER: OVERRIDE THIS WITH A VALID USERNAME +VIDEO_IMAGE_MAX_AGE: 31536000 +VIDEO_IMAGE_SETTINGS: + DIRECTORY_PREFIX: video-images/ + STORAGE_KWARGS: + base_url: /media/ + location: /edx/var/edxapp/media/ + VIDEO_IMAGE_MAX_BYTES: 2097152 + VIDEO_IMAGE_MIN_BYTES: 2048 +VIDEO_TRANSCRIPTS_MAX_AGE: 31536000 +VIDEO_TRANSCRIPTS_SETTINGS: + DIRECTORY_PREFIX: video-transcripts/ + STORAGE_KWARGS: + base_url: /media/ + location: /edx/var/edxapp/media/ + VIDEO_TRANSCRIPTS_MAX_BYTES: 3145728 +VIDEO_UPLOAD_PIPELINE: + BUCKET: '' + ROOT_PATH: '' +WIKI_ENABLED: true +XBLOCK_FS_STORAGE_BUCKET: null +XBLOCK_FS_STORAGE_PREFIX: null +XBLOCK_SETTINGS: {} +XQUEUE_INTERFACE: + basic_auth: + - edx + - edx + django_auth: + password: password + username: lms + url: http://edx.devstack.xqueue:18040 +X_FRAME_OPTIONS: DENY +YOUTUBE_API_KEY: PUT_YOUR_API_KEY_HERE +ZENDESK_API_KEY: '' +ZENDESK_CUSTOM_FIELDS: {} +ZENDESK_GROUP_ID_MAPPING: {} +ZENDESK_OAUTH_ACCESS_TOKEN: '' +ZENDESK_URL: '' +ZENDESK_USER: '' + diff --git a/docker/build/elasticsearch-devstack/Dockerfile b/docker/build/elasticsearch-devstack/Dockerfile index 4214de41d05..b851031f026 100644 --- a/docker/build/elasticsearch-devstack/Dockerfile +++ b/docker/build/elasticsearch-devstack/Dockerfile @@ -1,7 +1,7 @@ # docker build -f docker/build/elasticsearch-devstack/Dockerfile . -t edxops/elasticsearch:devstack FROM elasticsearch:1.5.2 -MAINTAINER edxops +LABEL maintainer="edxops" # Install the elastcisearch-head plugin (https://mobz.github.io/elasticsearch-head/) RUN /usr/share/elasticsearch/bin/plugin -install mobz/elasticsearch-head diff --git a/docker/build/elasticsearch/Dockerfile b/docker/build/elasticsearch/Dockerfile index c25c8de44dd..c869014eccb 100644 --- a/docker/build/elasticsearch/Dockerfile +++ b/docker/build/elasticsearch/Dockerfile @@ -1,5 +1,5 @@ FROM edxops/xenial-common:latest -MAINTAINER edxops +LABEL maintainer="edxops" ADD . /edx/app/edx_ansible/edx_ansible WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays diff --git a/docker/build/enterprise_catalog/Dockerfile b/docker/build/enterprise_catalog/Dockerfile new file mode 100644 index 00000000000..b3caf4af29f --- /dev/null +++ b/docker/build/enterprise_catalog/Dockerfile @@ -0,0 +1,32 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/enterprise_catalog/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/xenial-common:${BASE_IMAGE_TAG} +MAINTAINER edxops +USER root +CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] + +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/enterprise_catalog/ansible_overrides.yml / +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/build/enterprise_catalog/enterprise_catalog.yml /edx/etc/enterprise_catalog.yml + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook enterprise_catalog.yml \ + -c local -i "127.0.0.1," \ + -t "install,assets,devstack" \ + --extra-vars="ENTERPRISE_CATALOG_VERSION=${OPENEDX_RELEASE}" \ + --extra-vars="@/ansible_overrides.yml" \ + --extra-vars="@/devstack/ansible_overrides.yml" + +EXPOSE 18160 diff --git a/docker/build/enterprise_catalog/ansible_overrides.yml b/docker/build/enterprise_catalog/ansible_overrides.yml new file mode 100644 index 00000000000..3c1583c7313 --- /dev/null +++ b/docker/build/enterprise_catalog/ansible_overrides.yml @@ -0,0 +1,14 @@ +--- +COMMON_GIT_PATH: 'edx' + +COMMON_MYSQL_MIGRATE_USER: '{{ ENTERPRISE_CATALOG_MYSQL_USER }}' +COMMON_MYSQL_MIGRATE_PASS: '{{ ENTERPRISE_CATALOG_MYSQL_PASSWORD }}' + +ENTERPRISE_CATALOG_MYSQL_HOST: 'edx.devstack.mysql' +ENTERPRISE_CATALOG_DJANGO_SETTINGS_MODULE: 'enterprise_catalog.settings.devstack' +ENTERPRISE_CATALOG_GUNICORN_EXTRA: '--reload' +ENTERPRISE_CATALOG_MEMCACHE: ['edx.devstack.memcached:11211'] +ENTERPRISE_CATALOG_EXTRA_APPS: [] +ENTERPRISE_CATALOG_URL_ROOT: 'http://enterprise-catalog:18160' + +edx_django_service_is_devstack: true diff --git a/docker/build/enterprise_catalog/enterprise_catalog.yml b/docker/build/enterprise_catalog/enterprise_catalog.yml new file mode 100644 index 00000000000..7ba03dd619d --- /dev/null +++ b/docker/build/enterprise_catalog/enterprise_catalog.yml @@ -0,0 +1,63 @@ +--- + +API_ROOT: null +BACKEND_SERVICE_EDX_OAUTH2_KEY: enterprise-catalog-backend-service-key +BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://localhost:18000/oauth2 +BACKEND_SERVICE_EDX_OAUTH2_SECRET: enterprise-catalog-backend-service-secret +CACHES: + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_PREFIX: enterprise_catalog + LOCATION: + - edx.devstack.memcached:11211 +CELERY_ALWAYS_EAGER: false +CELERY_BROKER_HOSTNAME: '' +CELERY_BROKER_PASSWORD: '' +CELERY_BROKER_TRANSPORT: '' +CELERY_BROKER_USER: '' +CELERY_BROKER_VHOST: '' +CELERY_DEFAULT_EXCHANGE: enterprise_catalog +CELERY_DEFAULT_QUEUE: enterprise_catalog.default +CELERY_DEFAULT_ROUTING_KEY: enterprise_catalog +CERTIFICATE_LANGUAGES: null +CSRF_COOKIE_SECURE: false +DATABASES: + default: + ATOMIC_REQUESTS: false + CONN_MAX_AGE: 60 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: enterprise_catalog + OPTIONS: + connect_timeout: 10 + init_command: SET sql_mode='STRICT_TRANS_TABLES' + PASSWORD: password + PORT: '3306' + USER: entcatalog001 +EDX_DRF_EXTENSIONS: + OAUTH2_USER_INFO_URL: http://edx.devstack.lms:18000/oauth2/user_info +ENTERPRISE_CATALOG_SERVICE_USER: enterprise_catalog_service_user +EXTRA_APPS: [] +JWT_AUTH: + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_AUTH_REFRESH_COOKIE: edx-jwt-refresh-cookie + JWT_ISSUERS: + - AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret + JWT_PUBLIC_SIGNING_JWK_SET: '' +LANGUAGE_CODE: en +LANGUAGE_COOKIE_NAME: openedx-language-preference +MEDIA_STORAGE_BACKEND: '' +SECRET_KEY: SET-ME-PLEASE +SESSION_EXPIRE_AT_BROWSER_CLOSE: false +SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 +SOCIAL_AUTH_EDX_OAUTH2_KEY: enterprise-catalog-sso-key +SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout +SOCIAL_AUTH_EDX_OAUTH2_SECRET: enterprise-catalog-sso-secret +SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 +SOCIAL_AUTH_REDIRECT_IS_HTTPS: false +STATICFILES_STORAGE: django.contrib.staticfiles.storage.StaticFilesStorage +STATIC_ROOT: /edx/var/enterprise_catalog/staticfiles +TIME_ZONE: UTC diff --git a/docker/build/firefox/Dockerfile b/docker/build/firefox/Dockerfile index 98e69950842..043a086589d 100644 --- a/docker/build/firefox/Dockerfile +++ b/docker/build/firefox/Dockerfile @@ -1,5 +1,5 @@ -FROM selenium/standalone-firefox-debug:3.4.0-einsteinium -MAINTAINER edxops +FROM selenium/standalone-firefox-debug:3.14.0-arsenic +LABEL maintainer="edxops" USER root diff --git a/docker/build/flower/Dockerfile b/docker/build/flower/Dockerfile new file mode 100644 index 00000000000..a5c5168d762 --- /dev/null +++ b/docker/build/flower/Dockerfile @@ -0,0 +1,25 @@ +FROM ubuntu:xenial + +# Update and get pip. +RUN apt-get update && apt-get install -y python3-pip + +# Install the required packages +RUN pip3 install --no-cache-dir redis==2.10.6 celery==3.1.18 flower==0.9.2 + +# PYTHONUNBUFFERED: Force stdin, stdout and stderr to be totally unbuffered. (equivalent to `python -u`) +# PYTHONHASHSEED: Enable hash randomization (equivalent to `python -R`) +# PYTHONDONTWRITEBYTECODE: Do not write byte files to disk, since we maintain it as readonly. (equivalent to `python -B`) +ENV PYTHONUNBUFFERED=1 PYTHONHASHSEED=random PYTHONDONTWRITEBYTECODE=1 + +# Default port +EXPOSE 5555 + +RUN apt-get install bash -qy + +# Run as a non-root user by default, run as user with least privileges. +USER nobody + +# Mount a config here if you want to enable OAuth etc +ADD docker/build/flower/flowerconfig.py /flowerconfig.py + +ENTRYPOINT [ "flower" ] diff --git a/docker/build/flower/README.txt b/docker/build/flower/README.txt new file mode 100644 index 00000000000..31be8de0a41 --- /dev/null +++ b/docker/build/flower/README.txt @@ -0,0 +1,9 @@ +Example: +$ docker build . -t edxops/flower:latest +$ docker run -it --rm -p 127.0.0.1:5555:5555 edxops/flower:latest flower --broker=redis://:@some-redis-url.com:6379 --conf=flowerconfig.py + +$ curl localhost:5555 + + +Example with oauth: +docker run -it --rm -p 127.0.0.1:5555:5555 -e OAUTH2_KEY="xxxyyy.apps.googleusercontent.com" -e OAUTH2_SECRET="xxxxx" -e OAUTH2_REDIRECT_URI="flower-url.com/login" -e AUTH=".*@domain.org" edxops/flower:latest flower --broker=redis://myuser:mypass@my-redis.com:6379 diff --git a/docker/build/flower/flowerconfig.py b/docker/build/flower/flowerconfig.py new file mode 100644 index 00000000000..40422de8f02 --- /dev/null +++ b/docker/build/flower/flowerconfig.py @@ -0,0 +1,10 @@ + +import os + +address = os.getenv('ADDRESS', "0.0.0.0") +port = os.getenv('PORT', 5555) + +oauth2_key = os.getenv('OAUTH2_KEY', None) +oauth2_secret = os.getenv('OAUTH2_SECRET', None) +oauth2_redirect_uri = os.getenv('OAUTH2_REDIRECT_URI', None) +auth = os.getenv('AUTH', None) diff --git a/docker/build/forum/Dockerfile b/docker/build/forum/Dockerfile index 65037613669..7be8a052c7a 100644 --- a/docker/build/forum/Dockerfile +++ b/docker/build/forum/Dockerfile @@ -9,7 +9,7 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays ADD . /edx/app/edx_ansible/edx_ansible @@ -19,9 +19,10 @@ ARG OPENEDX_RELEASE=master ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook forum.yml \ -i '127.0.0.1,' -c local \ - -t "install:base,install:configuration,install:app-requirements,install:code" \ + -t "install:base,install:configuration,install:app-requirements,install:code,devstack:install" \ --extra-vars="forum_version=${OPENEDX_RELEASE}" \ --extra-vars="@/ansible_overrides.yml" WORKDIR /edx/app -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] +ENTRYPOINT ["/edx/app/forum/devstack.sh"] +CMD ["start"] EXPOSE 4567 diff --git a/docker/build/go-agent-frontend/Dockerfile b/docker/build/go-agent-frontend/Dockerfile new file mode 100644 index 00000000000..44633218ac6 --- /dev/null +++ b/docker/build/go-agent-frontend/Dockerfile @@ -0,0 +1,12 @@ +FROM edxops/go-agent:latest + +RUN curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash - && \ + apt-get update && apt-get install -y nodejs + +# !!!!NOTICE!!!! ---- Runner of this pipeline take heed!! You must replace go_github_key.pem with the REAL key material +# that can checkout private github repositories used as pipeline materials. The key material here is faked and is only +# used to pass CI! +# setup the github identity +ADD docker/build/go-agent/files/go_github_key.pem /home/go/.ssh/id_rsa +RUN chmod 600 /home/go/.ssh/id_rsa && \ + chown go:go /home/go/.ssh/id_rsa diff --git a/docker/build/go-agent-frontend/README.rst b/docker/build/go-agent-frontend/README.rst new file mode 100644 index 00000000000..2842abd3487 --- /dev/null +++ b/docker/build/go-agent-frontend/README.rst @@ -0,0 +1,73 @@ +Usage +##### + +Start the container with this: + +``docker run -d --network=edxgomatic_go-network -e GO_SERVER_URL=https://go-server:8154/go go-agent-frontend:latest`` + +If you need to start a few GoCD agents together, you can of course use the +shell to do that. Start a few agents in the background, like this: + +``for each in 1 2 3; do docker run -d --link angry_feynman:go-server edx/go-agent-frontend; done`` + +Getting into the container +########################## + +Sometimes, you need a shell inside the container (to create test repositories, +etc). docker provides an easy way to do that: + +``docker exec -i -t CONTAINER-ID /bin/bash`` + +To check the agent logs, you can do this: + +``docker exec -i -t CONTAINER-ID tail -f /var/log/go-agent/go-agent.log`` + +Agent Configuration +################### + +The go-agent expects it's configuration to be found at +``/var/lib/go-agent/config/``. Sharing the configuration between containers is +done by mounting a volume at this location that contains any configuration +files necessary. + +**Example docker run command:** +``docker run -ti -v /tmp/go-agent/conf:/var/lib/go-agent/config -e GO_SERVER=gocd.sandbox.edx.org 718d75c467c0 bash`` + +`How to setup auto registration for remote agents`_ + +Building and Uploading the container to ECS +########################################### + +- Build and tag the go-agent docker image + + - Follow the README in the go-agent directory to build and tag for go-agent. + +- Create image + + - This must be run from the root of the configuration repository + - ``docker build -f docker/build/go-agent-frontend/Dockerfile .`` + - or + - ``make docker.test.go-agent-frontend`` + +- Log docker in to AWS + + - Assume the role of the account you wish to log in to + + - ``source assume_role.sh `` + + - ``sh -c `aws ecr get-login --region us-east-1``` + + - You might need to remove the ``-e`` option returned by that command in + order to successfully login. + +- Tag image + + - ``docker tag ############.dkr.ecr.us-east-1.amazonaws.com/prod-tools-goagent-frontend:latest`` + - ``docker tag ############.dkr.ecr.us-east-1.amazonaws.com/prod-tools-goagent-frontend:`` + +- upload: + + - ``docker push ############.dkr.ecr.us-east-1.amazonaws.com/edx/release-pipeline/prod-tools-goagent-frontend:latest`` + - ``docker push ############.dkr.ecr.us-east-1.amazonaws.com/edx/release-pipeline/prod-tools-goagent-frontend:`` + +.. _How to setup auto registration for remote agents: https://docs.go.cd/current/advanced_usage/agent_auto_register.html diff --git a/docker/build/go-agent-marketing/Dockerfile b/docker/build/go-agent-marketing/Dockerfile index 50c2de05b2b..d7a0f5cc78e 100644 --- a/docker/build/go-agent-marketing/Dockerfile +++ b/docker/build/go-agent-marketing/Dockerfile @@ -6,17 +6,17 @@ RUN bash -c '\curl -sSL https://get.rvm.io | bash -s -- --ignore-dotfiles && \ usermod -aG rvm go && source /etc/profile.d/rvm.sh && \ rvm install ruby-2.4.1 && gem install bundler -v 1.16.0' -# Installs node 8.9.3 and npm 5.5.1 as of 12/13/17. Unlikely to change much since node 9 is a stable version for other OS -RUN curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash - && \ +# Installs node 12 and npm 6 as of October 2019. +RUN curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash - && \ apt-get update && apt-get install -y nodejs # Install php RUN apt-get update && apt-get install -y \ - php5-common \ - php5-cli + php-common \ + php-cli # Install drush (drupal shell) for access to Drupal commands/Acquia -RUN php -r "readfile('http://files.drush.org/drush.phar');" > drush && \ +RUN php -r "readfile('https://github.com/drush-ops/drush/releases/download/8.2.3/drush.phar');" > drush && \ chmod +x drush && \ sudo mv drush /usr/local/bin diff --git a/docker/build/go-agent/Dockerfile b/docker/build/go-agent/Dockerfile index 9cc7c296762..6c7d936508f 100644 --- a/docker/build/go-agent/Dockerfile +++ b/docker/build/go-agent/Dockerfile @@ -1,6 +1,6 @@ # To run, use the command docker build . -f docker/build/go-agent/Dockerfile --tag edx-ops/go-agent:latest -# https://hub.docker.com/r/gocd/gocd-agent-ubuntu-14.04/ -FROM gocd/gocd-agent-ubuntu-14.04:v17.10.0 +# https://hub.docker.com/r/gocd/gocd-agent-ubuntu-16.04/ +FROM gocd/gocd-agent-ubuntu-16.04:v17.10.0 LABEL version="0.02" \ description="This custom go-agent docker file installs additional requirements for the edx pipeline" @@ -42,9 +42,19 @@ RUN apt-get update && apt-get install -y -q \ # Install dependencies needed for Ansible 2.x RUN apt-get update && apt-get install -y libffi-dev libssl-dev +# Install sudo to execute commands with root privileges +RUN apt-get install -y sudo + +# Install packages needed for Docker installation +RUN apt-get update && apt-get install -y apt-transport-https gnupg-agent + +# Add Docker's GPG key and repository +RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - && \ + add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" + # Install Docker - for Docker container building by a go-agent. -COPY docker/build/go-agent/files/docker_install.sh /tmp/docker/ -RUN /bin/bash /tmp/docker/docker_install.sh +RUN apt-get update && apt-get install -y docker-ce # Add the go user to the docker group to allow the go user to run docker commands. # See: https://docs.docker.com/engine/installation/linux/ubuntulinux/ @@ -53,21 +63,32 @@ RUN usermod -aG docker go # Assign the go user root privlidges RUN printf "\ngo ALL=(ALL:ALL) NOPASSWD: /usr/bin/pip, /usr/local/bin/pip\n" >> /etc/sudoers RUN printf "\ngo ALL=(ALL:ALL) NOPASSWD: /usr/bin/pip3, /usr/local/bin/pip3\n" >> /etc/sudoers +RUN printf "\ngo ALL=(ALL:ALL) NOPASSWD: /usr/bin/pip3.6\n" >> /etc/sudoers # Upgrade pip and setup tools. Needed for Ansible 2.x RUN \ - pip install --upgrade pip==9.0.3 && \ - pip3 install --upgrade pip==9.0.3 && \ + pip3 install --upgrade pip==20.0.2 && \ + pip2 install --upgrade pip==20.0.2 && \ #pip may have moved from /usr/bin/ to /usr/local/bin/. This clears bash's path cache. hash -r && \ # upgrade setuptools early to avoid no distribution errors - pip install --upgrade setuptools==39.0.1 && \ - pip3 install --upgrade setuptools==39.0.1 + pip install --upgrade setuptools==44.1.0 && \ + pip3 install --upgrade setuptools==44.1.0 +# Compile python 3.6 for Zappa, as zappa requires python 2.7 or 3.6. +# There is no bionic GOCD official image published to base off of, and there is no official 3.6 python package for ubuntu 14.04 or 16.04 +# This will only be on your path as 'python3.6' it will not change the current 'python3' on your path to python 3.6 +# This isnt a separate agent because deploys of this are likely quite rare. +# If we update the GOCD agent to use ubuntu > 14 we can just use the python3 +# that is already installed for zappa deployments. +RUN apt-get update && apt-get install zlib1g-dev wget lsb-core -qy && cd /opt && wget https://www.python.org/ftp/python/3.6.7/Python-3.6.7.tgz && tar -xvf Python-3.6.7.tgz && cd /opt/Python-3.6.7 && ./configure --prefix=/usr && cd /opt/Python-3.6.7 && make && make commoninstall maninstall && rm -rf /opt/Python-3.6.7 # Install AWS command-line interface - for AWS operations in a go-agent task. RUN pip install 'awscli>=1.11.58' +# Install pip on python3.6 to install asym-crypto-yaml which requires python >= 3.6 +RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && python3.6 get-pip.py + # !!!!NOTICE!!!! ---- Runner of this pipeline take heed!! You must replace go_github_key.pem with the REAL key material # that can checkout private github repositories used as pipeline materials. The key material here is faked and is only # used to pass CI! diff --git a/docker/build/go-agent/files/docker_install.sh b/docker/build/go-agent/files/docker_install.sh deleted file mode 100755 index d4c2ef4f17b..00000000000 --- a/docker/build/go-agent/files/docker_install.sh +++ /dev/null @@ -1,506 +0,0 @@ -#!/bin/sh -set -e -# -# This script is meant for quick & easy install via: -# 'curl -sSL https://get.docker.com/ | sh' -# or: -# 'wget -qO- https://get.docker.com/ | sh' -# -# For test builds (ie. release candidates): -# 'curl -fsSL https://test.docker.com/ | sh' -# or: -# 'wget -qO- https://test.docker.com/ | sh' -# -# For experimental builds: -# 'curl -fsSL https://experimental.docker.com/ | sh' -# or: -# 'wget -qO- https://experimental.docker.com/ | sh' -# -# Docker Maintainers: -# To update this script on https://get.docker.com, -# use hack/release.sh during a normal release, -# or the following one-liner for script hotfixes: -# aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index -# - -url="https://get.docker.com/" -apt_url="https://apt.dockerproject.org" -yum_url="https://yum.dockerproject.org" -gpg_fingerprint="58118E89F3A912897C070ADBF76221572C52609D" - -key_servers=" -ha.pool.sks-keyservers.net -pgp.mit.edu -keyserver.ubuntu.com -" - -command_exists() { - command -v "$@" > /dev/null 2>&1 -} - -echo_docker_as_nonroot() { - if command_exists docker && [ -e /var/run/docker.sock ]; then - ( - set -x - $sh_c 'docker version' - ) || true - fi - your_user=your-user - [ "$user" != 'root' ] && your_user="$user" - # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output - cat <<-EOF - - If you would like to use Docker as a non-root user, you should now consider - adding your user to the "docker" group with something like: - - sudo usermod -aG docker $your_user - - Remember that you will have to log out and back in for this to take effect! - - EOF -} - -# Check if this is a forked Linux distro -check_forked() { - - # Check for lsb_release command existence, it usually exists in forked distros - if command_exists lsb_release; then - # Check if the `-u` option is supported - set +e - lsb_release -a -u > /dev/null 2>&1 - lsb_release_exit_code=$? - set -e - - # Check if the command has exited successfully, it means we're in a forked distro - if [ "$lsb_release_exit_code" = "0" ]; then - # Print info about current distro - cat <<-EOF - You're using '$lsb_dist' version '$dist_version'. - EOF - - # Get the upstream release info - lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]') - dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]') - - # Print info about upstream distro - cat <<-EOF - Upstream release is '$lsb_dist' version '$dist_version'. - EOF - else - if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ]; then - # We're Debian and don't even know it! - lsb_dist=debian - dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" - case "$dist_version" in - 8|'Kali Linux 2') - dist_version="jessie" - ;; - 7) - dist_version="wheezy" - ;; - esac - fi - fi - fi -} - -rpm_import_repository_key() { - local key=$1; shift - local tmpdir=$(mktemp -d) - chmod 600 "$tmpdir" - for key_server in $key_servers ; do - gpg --homedir "$tmpdir" --keyserver "$key_server" --recv-keys "$key" && break - done - gpg --homedir "$tmpdir" -k "$key" >/dev/null - gpg --homedir "$tmpdir" --export --armor "$key" > "$tmpdir"/repo.key - rpm --import "$tmpdir"/repo.key - rm -rf "$tmpdir" -} - -semverParse() { - major="${1%%.*}" - minor="${1#$major.}" - minor="${minor%%.*}" - patch="${1#$major.$minor.}" - patch="${patch%%[-.]*}" -} - -do_install() { - case "$(uname -m)" in - *64) - ;; - *) - cat >&2 <<-'EOF' - Error: you are not using a 64bit platform. - Docker currently only supports 64bit platforms. - EOF - exit 1 - ;; - esac - - if command_exists docker; then - version="$(docker -v | awk -F '[ ,]+' '{ print $3 }')" - MAJOR_W=1 - MINOR_W=10 - - semverParse $version - - shouldWarn=0 - if [ $major -lt $MAJOR_W ]; then - shouldWarn=1 - fi - - if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then - shouldWarn=1 - fi - - cat >&2 <<-'EOF' - Warning: the "docker" command appears to already exist on this system. - - If you already have Docker installed, this script can cause trouble, which is - why we're displaying this warning and provide the opportunity to cancel the - installation. - - If you installed the current Docker package using this script and are using it - EOF - - if [ $shouldWarn -eq 1 ]; then - cat >&2 <<-'EOF' - again to update Docker, we urge you to migrate your image store before upgrading - to v1.10+. - - You can find instructions for this here: - https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration - EOF - else - cat >&2 <<-'EOF' - again to update Docker, you can safely ignore this message. - EOF - fi - - cat >&2 <<-'EOF' - - You may press Ctrl+C now to abort this script. - EOF - ( set -x; sleep 20 ) - fi - - user="$(id -un 2>/dev/null || true)" - - sh_c='sh -c' - if [ "$user" != 'root' ]; then - if command_exists sudo; then - sh_c='sudo -E sh -c' - elif command_exists su; then - sh_c='su -c' - else - cat >&2 <<-'EOF' - Error: this installer needs the ability to run commands as root. - We are unable to find either "sudo" or "su" available to make this happen. - EOF - exit 1 - fi - fi - - curl='' - if command_exists curl; then - curl='curl -sSL' - elif command_exists wget; then - curl='wget -qO-' - elif command_exists busybox && busybox --list-modules | grep -q wget; then - curl='busybox wget -qO-' - fi - - # check to see which repo they are trying to install from - if [ -z "$repo" ]; then - repo='main' - if [ "https://test.docker.com/" = "$url" ]; then - repo='testing' - elif [ "https://experimental.docker.com/" = "$url" ]; then - repo='experimental' - fi - fi - - # perform some very rudimentary platform detection - lsb_dist='' - dist_version='' - if command_exists lsb_release; then - lsb_dist="$(lsb_release -si)" - fi - if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then - lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" - fi - if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then - lsb_dist='debian' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then - lsb_dist='fedora' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then - lsb_dist='oracleserver' - fi - if [ -z "$lsb_dist" ]; then - if [ -r /etc/centos-release ] || [ -r /etc/redhat-release ]; then - lsb_dist='centos' - fi - fi - if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then - lsb_dist="$(. /etc/os-release && echo "$ID")" - fi - - lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" - - case "$lsb_dist" in - - ubuntu) - if command_exists lsb_release; then - dist_version="$(lsb_release --codename | cut -f2)" - fi - if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then - dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")" - fi - ;; - - debian) - dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" - case "$dist_version" in - 8) - dist_version="jessie" - ;; - 7) - dist_version="wheezy" - ;; - esac - ;; - - oracleserver) - # need to switch lsb_dist to match yum repo URL - lsb_dist="oraclelinux" - dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')" - ;; - - fedora|centos) - dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')" - ;; - - *) - if command_exists lsb_release; then - dist_version="$(lsb_release --codename | cut -f2)" - fi - if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then - dist_version="$(. /etc/os-release && echo "$VERSION_ID")" - fi - ;; - - - esac - - # Check if this is a forked Linux distro - check_forked - - # Run setup for each distro accordingly - case "$lsb_dist" in - amzn) - ( - set -x - $sh_c 'sleep 3; yum -y -q install docker' - ) - echo_docker_as_nonroot - exit 0 - ;; - - 'opensuse project'|opensuse) - echo 'Going to perform the following operations:' - if [ "$repo" != 'main' ]; then - echo ' * add repository obs://Virtualization:containers' - fi - echo ' * install Docker' - $sh_c 'echo "Press CTRL-C to abort"; sleep 3' - - if [ "$repo" != 'main' ]; then - # install experimental packages from OBS://Virtualization:containers - ( - set -x - zypper -n ar -f obs://Virtualization:containers Virtualization:containers - rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2 - ) - fi - ( - set -x - zypper -n install docker - ) - echo_docker_as_nonroot - exit 0 - ;; - 'suse linux'|sle[sd]) - echo 'Going to perform the following operations:' - if [ "$repo" != 'main' ]; then - echo ' * add repository obs://Virtualization:containers' - echo ' * install experimental Docker using packages NOT supported by SUSE' - else - echo ' * add the "Containers" module' - echo ' * install Docker using packages supported by SUSE' - fi - $sh_c 'echo "Press CTRL-C to abort"; sleep 3' - - if [ "$repo" != 'main' ]; then - # install experimental packages from OBS://Virtualization:containers - echo >&2 'Warning: installing experimental packages from OBS, these packages are NOT supported by SUSE' - ( - set -x - zypper -n ar -f obs://Virtualization:containers/SLE_12 Virtualization:containers - rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2 - ) - else - # Add the containers module - # Note well-1: the SLE machine must already be registered against SUSE Customer Center - # Note well-2: the `-r ""` is required to workaround a known issue of SUSEConnect - ( - set -x - SUSEConnect -p sle-module-containers/12/x86_64 -r "" - ) - fi - ( - set -x - zypper -n install docker - ) - echo_docker_as_nonroot - exit 0 - ;; - - ubuntu|debian) - export DEBIAN_FRONTEND=noninteractive - - did_apt_get_update= - apt_get_update() { - if [ -z "$did_apt_get_update" ]; then - ( set -x; $sh_c 'sleep 3; apt-get update' ) - did_apt_get_update=1 - fi - } - - # aufs is preferred over devicemapper; try to ensure the driver is available. - if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then - if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then - kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual" - - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true - - if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then - echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' - echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' - ( set -x; sleep 10 ) - fi - else - echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual' - echo >&2 ' package. We have no AUFS support. Consider installing the packages' - echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.' - ( set -x; sleep 10 ) - fi - fi - - # install apparmor utils if they're missing and apparmor is enabled in the kernel - # otherwise Docker will fail to start - if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then - if command -v apparmor_parser >/dev/null 2>&1; then - echo 'apparmor is enabled in the kernel and apparmor utils were already installed' - else - echo 'apparmor is enabled in the kernel, but apparmor_parser missing' - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) - fi - fi - - if [ ! -e /usr/lib/apt/methods/https ]; then - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) - fi - if [ -z "$curl" ]; then - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) - curl='curl -sSL' - fi - ( - set -x - for key_server in $key_servers ; do - $sh_c "apt-key adv --keyserver hkp://${key_server}:80 --recv-keys ${gpg_fingerprint}" && break - done - $sh_c "apt-key adv -k ${gpg_fingerprint} >/dev/null" - $sh_c "mkdir -p /etc/apt/sources.list.d" - $sh_c "echo deb [arch=$(dpkg --print-architecture)] ${apt_url}/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list" - $sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine' - ) - echo_docker_as_nonroot - exit 0 - ;; - - fedora|centos|oraclelinux) - $sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF - [docker-${repo}-repo] - name=Docker ${repo} Repository - baseurl=${yum_url}/repo/${repo}/${lsb_dist}/${dist_version} - enabled=1 - gpgcheck=1 - gpgkey=${yum_url}/gpg - EOF - if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then - ( - set -x - $sh_c 'sleep 3; dnf -y -q install docker-engine' - ) - else - ( - set -x - $sh_c 'sleep 3; yum -y -q install docker-engine' - ) - fi - echo_docker_as_nonroot - exit 0 - ;; - gentoo) - if [ "$url" = "https://test.docker.com/" ]; then - # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output - cat >&2 <<-'EOF' - - You appear to be trying to install the latest nightly build in Gentoo.' - The portage tree should contain the latest stable release of Docker, but' - if you want something more recent, you can always use the live ebuild' - provided in the "docker" overlay available via layman. For more' - instructions, please see the following URL:' - - https://github.com/tianon/docker-overlay#using-this-overlay' - - After adding the "docker" overlay, you should be able to:' - - emerge -av =app-emulation/docker-9999' - - EOF - exit 1 - fi - - ( - set -x - $sh_c 'sleep 3; emerge app-emulation/docker' - ) - exit 0 - ;; - esac - - # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output - cat >&2 <<-'EOF' - - Either your platform is not easily detectable, is not supported by this - installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have - a package for Docker. Please visit the following URL for more detailed - installation instructions: - - https://docs.docker.com/engine/installation/ - - EOF - exit 1 -} - -# wrapped up in a function so that we have some protection against only getting -# half the file during "curl | sh" -do_install diff --git a/docker/build/go-server/Dockerfile b/docker/build/go-server/Dockerfile index 5573ce159db..f325ad22c61 100644 --- a/docker/build/go-server/Dockerfile +++ b/docker/build/go-server/Dockerfile @@ -1,6 +1,6 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" ADD . /edx/app/edx_ansible/edx_ansible WORKDIR /var/lib/go-server diff --git a/docker/build/graphite/Dockerfile b/docker/build/graphite/Dockerfile index 1bd7b78c1d8..33b89792bb0 100644 --- a/docker/build/graphite/Dockerfile +++ b/docker/build/graphite/Dockerfile @@ -1,5 +1,5 @@ FROM edxops/xenial-common:latest -MAINTAINER edxops +LABEL maintainer="edxops" USER root ADD . /edx/app/edx_ansible/edx_ansible diff --git a/docker/build/insights/Dockerfile b/docker/build/insights/Dockerfile index cdaf190c133..4b90175c386 100644 --- a/docker/build/insights/Dockerfile +++ b/docker/build/insights/Dockerfile @@ -9,10 +9,12 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" ADD . /edx/app/edx_ansible/edx_ansible COPY docker/build/insights/ansible_overrides.yml / +COPY docker/build/insights/insights.yml /edx/etc/insights.yml + WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays ARG OPENEDX_RELEASE=master @@ -22,5 +24,8 @@ RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook insights.yml \ -t "install:base,install:system-requirements,install:configuration,install:app-requirements,install:code" \ --extra-vars="INSIGHTS_VERSION=${OPENEDX_RELEASE}" \ --extra-vars="@/ansible_overrides.yml" -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] +ADD docker/build/insights/devstack.sh /edx/app/insights/devstack.sh +RUN chown insights:insights /edx/app/insights/devstack.sh && chmod a+x /edx/app/insights/devstack.sh +ENTRYPOINT ["/edx/app/insights/devstack.sh"] +CMD ["start"] EXPOSE 8110 18110 diff --git a/docker/build/insights/ansible_overrides.yml b/docker/build/insights/ansible_overrides.yml index 7a988ee948a..480e0b1fd3c 100644 --- a/docker/build/insights/ansible_overrides.yml +++ b/docker/build/insights/ansible_overrides.yml @@ -9,6 +9,8 @@ INSIGHTS_MEMCACHE: - "memcache.{{ DOCKER_TLD }}:11211" ANALYTICS_API_ENDPOINT: "http://analtyicsapi.{{ DOCKER_TLD }}:8100/api/v0" +insights_django_settings: "analytics_dashboard.settings.devstack" + INSIGHTS_DATABASES: # rw user default: @@ -18,3 +20,5 @@ INSIGHTS_DATABASES: PASSWORD: 'secret' HOST: "db.{{ DOCKER_TLD }}" PORT: '3306' + +edx_django_service_is_devstack: true diff --git a/docker/build/insights/devstack.sh b/docker/build/insights/devstack.sh new file mode 100644 index 00000000000..46addc019bb --- /dev/null +++ b/docker/build/insights/devstack.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +COMMAND=$1 + +case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; + open) + . /edx/app/insights/venvs/insights/bin/activate + cd /edx/app/insights/insights + + /bin/bash + ;; + exec) + shift + + . /edx/app/insights/venvs/insights/bin/activate + cd /edx/app/insights/insights + + "$@" + ;; + *) + "$@" + ;; +esac diff --git a/docker/build/insights/insights.yml b/docker/build/insights/insights.yml new file mode 100644 index 00000000000..dbe361e8a21 --- /dev/null +++ b/docker/build/insights/insights.yml @@ -0,0 +1,59 @@ +--- + +APPLICATION_NAME: Insights +BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://edx.devstack.lms:18000/oauth2 +CACHES: + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_PREFIX: default_env-default_deployment-insights + LOCATION: + - edx.devstack.memcached:11211 +CDN_DOMAIN: null +CMS_COURSE_SHORTCUT_BASE_URL: http://edx.devstack.lms:18000/course +COURSE_API_URL: http://edx.devstack.lms:18000/api/courses/v1/ +CSRF_COOKIE_NAME: insights_csrftoken +CSRF_COOKIE_SECURE: false +DATABASES: + default: + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: dashboard + PASSWORD: secret + PORT: '3306' + USER: rosencrantz +DATA_API_AUTH_TOKEN: edx +DATA_API_URL: http://edx.devstack.analyticsapi:18100/api/v0 +DOCUMENTATION_LOAD_ERROR_URL: http://127.0.0.1/en/latest/Reference.html#error-conditions +EMAIL_HOST: smtp.example.com +EMAIL_HOST_PASSWORD: mail_password +EMAIL_HOST_USER: mail_user +EMAIL_PORT: 587 +ENABLE_AUTO_AUTH: true +GRADING_POLICY_API_URL: http://edx.devstack.lms:18000/api/grades/v1/ +HELP_URL: http://127.0.0.1/en/latest +LANGUAGE_CODE: en-us +LANGUAGE_COOKIE_NAME: insights_language +LEARNER_API_LIST_DOWNLOAD_FIELDS: null +LMS_COURSE_SHORTCUT_BASE_URL: URL_FOR_LMS_COURSE_LIST_PAGE +MODULE_PREVIEW_URL: http://edx.devstack.lms:18000/xblock +OPEN_SOURCE_URL: http://set-me-please +PLATFORM_NAME: edX +PRIVACY_POLICY_URL: http://example.com/privacy-policy +RESEARCH_URL: https://www.edx.org/research-pedagogy +SECRET_KEY: YOUR_SECRET_KEY_HERE +SEGMENT_IGNORE_EMAIL_REGEX: null +SEGMENT_IO_KEY: YOUR_KEY +SESSION_COOKIE_NAME: insights_sessionid +SESSION_EXPIRE_AT_BROWSER_CLOSE: false +SOCIAL_AUTH_REDIRECT_IS_HTTPS: false +SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://localhost:18000 +SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://edx.devstack.lms:18000 +SOCIAL_AUTH_EDX_OAUTH2_PUBLIC_URL_ROOT: http://localhost:18000 +SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout +STATICFILES_DIRS: +- /edx/app/insights/edx_analytics_dashboard/analytics_dashboard/static +STATIC_ROOT: /edx/var/insights/staticfiles +SUPPORT_EMAIL: '' +TERMS_OF_SERVICE_URL: http://example.com/terms-service +TIME_ZONE: UTC + diff --git a/docker/build/jenkins_analytics/Dockerfile.noci b/docker/build/jenkins_analytics/Dockerfile.noci index 522241d752f..a5c4c1fc00b 100644 --- a/docker/build/jenkins_analytics/Dockerfile.noci +++ b/docker/build/jenkins_analytics/Dockerfile.noci @@ -1,5 +1,5 @@ FROM edxops/xenial-common:latest -MAINTAINER edxops +LABEL maintainer="edxops" USER root RUN apt-get update diff --git a/docker/build/jenkins_build/Dockerfile b/docker/build/jenkins_build/Dockerfile index e67f1dfa8ac..016ffe7dee8 100644 --- a/docker/build/jenkins_build/Dockerfile +++ b/docker/build/jenkins_build/Dockerfile @@ -9,7 +9,7 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" USER root RUN apt-get update diff --git a/docker/build/jenkins_build/ansible_overrides.yml b/docker/build/jenkins_build/ansible_overrides.yml index 47a2f7b6cef..5b49ca9470d 100644 --- a/docker/build/jenkins_build/ansible_overrides.yml +++ b/docker/build/jenkins_build/ansible_overrides.yml @@ -6,6 +6,8 @@ build_jenkins_configuration_scripts: - 1addJarsToClasspath.groovy - 2checkInstalledPlugins.groovy - 3importCredentials.groovy + - 3installGroovy.groovy + - 3installPython.groovy - 3mainConfiguration.groovy - 3setGlobalProperties.groovy - 3shutdownCLI.groovy @@ -14,11 +16,10 @@ build_jenkins_configuration_scripts: - 4configureGHPRB.groovy - 4configureGit.groovy - 4configureGithub.groovy - - 4configureHipChat.groovy - - 4configureJobConfigHistory.groovy - 4configureMailerPlugin.groovy - 4configureMaskPasswords.groovy - 4configureSecurity.groovy + - 5configureEmailExtension.groovy - 5addSeedJob.groovy # added this - 5createLoggers.groovy @@ -26,29 +27,50 @@ build_jenkins_configuration_scripts: jenkins_common_non_plugin_template_files: - credentials - ec2_config + - email_ext_config - ghprb_config - git_config - github_config # - github_oauth # intentionally commented out - - hipchat_config - - job_config_history + - groovy_config - log_config - mailer_config - main_config - mask_passwords_config - properties_config + - python_config - security - seed_config # Add the jenkins-worker label so that this jenkins master will work # out-of-the-box for running most kinds of jobs. This makes integration # testing easier, and is easier for the openedx community. +# Also add the android-worker label so that android testing can be done +# easily on a local dev environment. NOTE: this also requires running +# playbooks/android_sdk.yml in order to have all of the necessary Android +# compilation and testing tools, but isn't necessary for most cases and +# therefore should be omitted from normal builds of this container. jenkins_common_main_labels: - 'dsl-seed-runner' - 'backup-runner' - 'jenkins-worker' # added this + - 'android-worker' # We're running all our jobs on the Jenkins Master by default (one container # only), so we need to bump up the number of executors for some jobs with # downstream jobs to work correctly. jenkins_common_main_num_executors: 6 + +# Add basic permissions for a non-authenticated user to be able to view +# the jenkins instance and its jobs. When used for development, an +# initial admin password is generated and will be used for job creation. +# However, in order to get to the login page, these permissions must be +# configured. +JENKINS_SECURITY_GROUPS: + - NAME: 'anonymous' + PERMISSIONS: + - 'hudson.model.Hudson.Read' + - 'hudson.model.Item.Discover' + - 'hudson.model.Item.Read' + USERS: + - 'anonymous' diff --git a/docker/build/jenkins_worker/Dockerfile b/docker/build/jenkins_worker/Dockerfile index 9598e59dc03..225db9f5c94 100644 --- a/docker/build/jenkins_worker/Dockerfile +++ b/docker/build/jenkins_worker/Dockerfile @@ -10,35 +10,24 @@ # Run the edxapp play with custom ansible overrides ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" USER root ADD . /edx/app/edx_ansible/edx_ansible WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays COPY docker/build/jenkins_worker/ansible_overrides.yml /jenkins_worker/ansible_overrides.yml -COPY docker/build/edxapp/devstack.yml / -COPY docker/build/devstack/ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml ARG OPENEDX_RELEASE=master ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook edxapp.yml \ -c local -i '127.0.0.1,' \ - -t 'install,assets,devstack,jenkins-worker' \ + -t 'install,assets,devstack' \ --extra-vars="edx_platform_version=${OPENEDX_RELEASE}" \ --extra-vars="@/jenkins_worker/ansible_overrides.yml" \ - --extra-vars="@/devstack.yml" \ - --extra-vars="@/devstack/ansible_overrides.yml" - -# Run the mongo play -COPY docker/build/mongo/ansible_overrides.yml /mongo/ansible_overrides.yml - -RUN mkdir -p /data/db -WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays -RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook mongo.yml \ - -i '127.0.0.1,' -c local \ - -t 'install' \ - --extra-vars="@/mongo/ansible_overrides.yml" + --extra-vars="@/devstack/ansible_overrides.yml" \ + && rm -rf /edx/app/edxapp/.cache /edx/app/edxapp/edx-platform # Add sshd to enable jenkins master to ssh into containers RUN apt-get update \ @@ -47,11 +36,7 @@ RUN apt-get update \ ARG JENKINS_WORKER_KEY_URL=https://files.edx.org/testeng/jenkins.keys RUN mkdir /var/run/sshd \ - && groupadd ubuntu \ - && useradd -ms /bin/bash ubuntu -g ubuntu -d /home/ubuntu \ - && curl ${JENKINS_WORKER_KEY_URL} --create-dirs -o /home/ubuntu/.ssh/authorized_keys - -RUN chown -R ubuntu /home/ubuntu /edx/app/edxapp/edx-platform -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] + && curl ${JENKINS_WORKER_KEY_URL} --create-dirs -o /edx/app/edxapp/.ssh/authorized_keys +CMD ["/usr/sbin/sshd", "-D"] EXPOSE 22 diff --git a/docker/build/jenkins_worker/ansible_overrides.yml b/docker/build/jenkins_worker/ansible_overrides.yml index 434b76a8ce0..5cd7adb2d96 100644 --- a/docker/build/jenkins_worker/ansible_overrides.yml +++ b/docker/build/jenkins_worker/ansible_overrides.yml @@ -1,24 +1,29 @@ --- -EDXAPP_SETTINGS: 'devstack_docker' - -MONGO_AUTH: false - -devstack: true -migrate_db: false -mongo_enable_journal: false -edxapp_npm_production: "no" - +COMMON_SECURITY_UPDATES: true +EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: true +EDXAPP_LMS_BASE_SCHEME: http EDXAPP_LMS_GUNICORN_EXTRA_CONF: 'reload = True' - EDXAPP_NO_PREREQ_INSTALL: 0 EDXAPP_OAUTH_ENFORCE_SECURE: false -EDXAPP_LMS_BASE_SCHEME: http -COMMON_SECURITY_UPDATES: true -SECURITY_UPGRADE_ON_ANSIBLE: true - -EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: true - EDXAPP_PYTHON_SANDBOX: false +EDXAPP_SETTINGS: 'devstack_docker' +MONGO_AUTH: false +SECURITY_UPGRADE_ON_ANSIBLE: true +devstack: true edxapp_debian_pkgs_extra: - mongodb-clients +edxapp_environment_extra: + SELENIUM_BROWSER: 'firefox' + SELENIUM_HOST: 'localhost' + SELENIUM_PORT: '4444' +edxapp_npm_production: 'no' +edxapp_requirements_files: + - "{{ testing_requirements_file }}" +edxapp_user: ubuntu +edxapp_user_createhome: 'yes' +edxapp_user_shell: '/bin/bash' +migrate_db: false +mongo_enable_journal: false +service_variants_enabled: [] +testing_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/testing.txt" diff --git a/docker/build/mongo/Dockerfile b/docker/build/mongo/Dockerfile index b16da26fcfb..520014793a9 100644 --- a/docker/build/mongo/Dockerfile +++ b/docker/build/mongo/Dockerfile @@ -1,5 +1,5 @@ FROM edxops/xenial-common:latest -MAINTAINER edxops +LABEL maintainer="edxops" ADD . /edx/app/edx_ansible/edx_ansible COPY docker/build/mongo/ansible_overrides.yml / diff --git a/docker/build/mysql/Dockerfile b/docker/build/mysql/Dockerfile index 94e1d3cf2e4..435a4d953e7 100644 --- a/docker/build/mysql/Dockerfile +++ b/docker/build/mysql/Dockerfile @@ -1,5 +1,5 @@ FROM edxops/xenial-common:latest -MAINTAINER edxops +LABEL maintainer="edxops" ADD . /edx/app/edx_ansible/edx_ansible COPY docker/build/mysql/ansible_overrides.yml / diff --git a/docker/build/nginx/Dockerfile b/docker/build/nginx/Dockerfile index 3e9953d3e1d..6ac27432e25 100644 --- a/docker/build/nginx/Dockerfile +++ b/docker/build/nginx/Dockerfile @@ -1,5 +1,5 @@ FROM edxops/xenial-common:latest -MAINTAINER edxops +LABEL maintainer="edxops" USER root ADD . /edx/app/edx_ansible/edx_ansible diff --git a/docker/build/notes/Dockerfile b/docker/build/notes/Dockerfile index 4ca7c2d5c67..31c0f7426d9 100644 --- a/docker/build/notes/Dockerfile +++ b/docker/build/notes/Dockerfile @@ -9,7 +9,7 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" ARG OPENEDX_RELEASE=master ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} @@ -21,6 +21,11 @@ ADD . /edx/app/edx_ansible/edx_ansible WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays COPY docker/build/notes/ansible_overrides.yml / +COPY docker/build/notes/edx_notes_api.yml /edx/etc/edx_notes_api.yml + + +RUN sudo apt-get update && sudo apt-get -y install python3-dev libmysqlclient-dev + RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook notes.yml \ -c local -i '127.0.0.1,' \ -t 'install,assets,devstack:install' \ @@ -29,4 +34,5 @@ RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook notes.yml \ --extra-vars="COMMON_GIT_PATH=$REPO_OWNER" USER root -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] +ENTRYPOINT ["/edx/app/edx_notes_api/devstack.sh"] +CMD ["start"] diff --git a/docker/build/notes/ansible_overrides.yml b/docker/build/notes/ansible_overrides.yml index e431fae5f79..4f0411e71be 100644 --- a/docker/build/notes/ansible_overrides.yml +++ b/docker/build/notes/ansible_overrides.yml @@ -9,3 +9,5 @@ COMMON_MYSQL_MIGRATE_PASS: '{{ EDX_NOTES_API_MYSQL_DB_PASSWORD }}' # default production settings (notesserver.settings.yaml_config). # This is also consistent with all other IDAs. EDX_NOTES_API_DJANGO_SETTINGS_MODULE: 'notesserver.settings.devstack' + +devstack: "true" diff --git a/docker/build/notes/edx_notes_api.yml b/docker/build/notes/edx_notes_api.yml new file mode 100644 index 00000000000..9782f5b4c87 --- /dev/null +++ b/docker/build/notes/edx_notes_api.yml @@ -0,0 +1,36 @@ +--- + +ALLOWED_HOSTS: +- localhost +CLIENT_ID: CHANGEME +CLIENT_SECRET: CHANGEME +DATABASES: + default: + ENGINE: django.db.backends.mysql + HOST: db + NAME: edx_notes_api + OPTIONS: + connect_timeout: 10 + PASSWORD: secret + PORT: '3306' + USER: notes001 +DISABLE_TOKEN_CHECK: false +ELASTICSEARCH_INDEX: edx_notes +ELASTICSEARCH_URL: http://es:9200/ +HAYSTACK_CONNECTIONS: + default: + ENGINE: notesserver.highlight.ElasticsearchSearchEngine + INDEX_NAME: edx_notes_api + URL: http://es:9200/ +JWT_AUTH: + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUERS: + - AUDIENCE: SET-ME-PLEASE + ISSUER: http://127.0.0.1:8000/oauth2 + SECRET_KEY: SET-ME-PLEASE + JWT_PUBLIC_SIGNING_JWK_SET: '' +RESULTS_DEFAULT_SIZE: 25 +RESULTS_MAX_SIZE: 250 +SECRET_KEY: CHANGEME +USERNAME_REPLACEMENT_WORKER: OVERRIDE THIS WITH A VALID USERNAME diff --git a/docker/build/notifier/Dockerfile b/docker/build/notifier/Dockerfile index d06791f26de..3bdc54730bb 100644 --- a/docker/build/notifier/Dockerfile +++ b/docker/build/notifier/Dockerfile @@ -1,5 +1,5 @@ FROM edxops/xenial-common:latest -MAINTAINER edxops +LABEL maintainer="edxops" WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays ADD . /edx/app/edx_ansible/edx_ansible @@ -9,4 +9,5 @@ RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook notifier.yml \ -t 'install' \ -e@/ansible_overrides.yml WORKDIR /edx/app -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] +ENTRYPOINT ["/edx/app/notifier/devstack.sh"] +CMD ["start"] diff --git a/docker/build/notifier/ansible_overrides.yml b/docker/build/notifier/ansible_overrides.yml index f7cd40387ab..997f84517cb 100644 --- a/docker/build/notifier/ansible_overrides.yml +++ b/docker/build/notifier/ansible_overrides.yml @@ -1,3 +1,4 @@ --- DOCKER_TLD: "notifier" +devstack: true diff --git a/docker/build/rabbitmq/Dockerfile b/docker/build/rabbitmq/Dockerfile index e13655bb469..68321b75181 100644 --- a/docker/build/rabbitmq/Dockerfile +++ b/docker/build/rabbitmq/Dockerfile @@ -1,5 +1,5 @@ FROM edxops/xenial-common:latest -MAINTAINER edxops +LABEL maintainer="edxops" ADD . /edx/app/edx_ansible/edx_ansible COPY docker/build/rabbitmq/ansible_overrides.yml / diff --git a/docker/build/registrar/Dockerfile b/docker/build/registrar/Dockerfile new file mode 100644 index 00000000000..dd2efa827ac --- /dev/null +++ b/docker/build/registrar/Dockerfile @@ -0,0 +1,33 @@ +# To build this Dockerfile: +# +# From the root of configuration: +# +# docker build -f docker/build/registrar/Dockerfile . +# +# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible +# with the currently checked-out configuration repo. + +ARG BASE_IMAGE_TAG=latest +FROM edxops/xenial-common:${BASE_IMAGE_TAG} +LABEL maintainer="edxops" +USER root +ENTRYPOINT ["/edx/app/registrar/devstack.sh"] +CMD ["start"] + +ADD . /edx/app/edx_ansible/edx_ansible +WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays + +COPY docker/build/registrar/ansible_overrides.yml / +COPY docker/devstack_common_ansible_overrides.yml /devstack/ansible_overrides.yml +COPY docker/build/registrar/registrar.yml /edx/etc/registrar.yml + +ARG OPENEDX_RELEASE=master +ENV OPENEDX_RELEASE=${OPENEDX_RELEASE} +RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook registrar.yml \ + -c local -i "127.0.0.1," \ + -t "install,assets,devstack" \ + --extra-vars="REGISTRAR_VERSION=${OPENEDX_RELEASE}" \ + --extra-vars="@/ansible_overrides.yml" \ + --extra-vars="@/devstack/ansible_overrides.yml" + +EXPOSE 18734 diff --git a/docker/build/registrar/ansible_overrides.yml b/docker/build/registrar/ansible_overrides.yml new file mode 100644 index 00000000000..649d30fd311 --- /dev/null +++ b/docker/build/registrar/ansible_overrides.yml @@ -0,0 +1,15 @@ +--- +COMMON_GIT_PATH: 'edx' + +COMMON_MYSQL_MIGRATE_USER: '{{ REGISTRAR_MYSQL_USER }}' +COMMON_MYSQL_MIGRATE_PASS: '{{ REGISTRAR_MYSQL_PASSWORD }}' + +REGISTRAR_MYSQL_HOST: 'edx.devstack.mysql' +REGISTRAR_DJANGO_SETTINGS_MODULE: 'registrar.settings.devstack' +REGISTRAR_GUNICORN_EXTRA: '--reload' +REGISTRAR_MEMCACHE: ['edx.devstack.memcached:11211'] +REGISTRAR_EXTRA_APPS: [] + +REGISTRAR_SECRET_KEY: 'hBiEM5pDr8GsZv1lh6GKmD0c9SF5Z00TFEoRY1zSmCxijFrR' + +edx_django_service_is_devstack: true diff --git a/docker/build/registrar/registrar.yml b/docker/build/registrar/registrar.yml new file mode 100644 index 00000000000..324d1322fb1 --- /dev/null +++ b/docker/build/registrar/registrar.yml @@ -0,0 +1,72 @@ +--- + +API_ROOT: http://localhost:18734/api +BACKEND_SERVICE_EDX_OAUTH2_KEY: registrar-backend-service-key +BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://localhost:18000/oauth2 +BACKEND_SERVICE_EDX_OAUTH2_SECRET: registrar-backend-service-secret +CACHES: + default: + BACKEND: django.core.cache.backends.memcached.MemcachedCache + KEY_PREFIX: registrar + LOCATION: + - edx.devstack.memcached:11211 +CELERY_ALWAYS_EAGER: false +CELERY_BROKER_HOSTNAME: '' +CELERY_BROKER_PASSWORD: '' +CELERY_BROKER_TRANSPORT: '' +CELERY_BROKER_USER: '' +CELERY_BROKER_VHOST: '' +CELERY_DEFAULT_EXCHANGE: registrar +CELERY_DEFAULT_QUEUE: registrar.default +CELERY_DEFAULT_ROUTING_KEY: registrar +CERTIFICATE_LANGUAGES: + en: English + es_419: Spanish +CORS_ORIGIN_WHITELIST: [] +CSRF_COOKIE_SECURE: false +CSRF_TRUSTED_ORIGINS: [] +DATABASES: + default: + ATOMIC_REQUESTS: false + CONN_MAX_AGE: 60 + ENGINE: django.db.backends.mysql + HOST: edx.devstack.mysql + NAME: registrar + OPTIONS: + connect_timeout: 10 + init_command: SET sql_mode='STRICT_TRANS_TABLES' + PASSWORD: password + PORT: '3306' + USER: registrar001 +DISCOVERY_BASE_URL: null +EDX_DRF_EXTENSIONS: + OAUTH2_USER_INFO_URL: http://edx.devstack.lms:18000/oauth2/user_info +EXTRA_APPS: [] +JWT_AUTH: + JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload + JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature + JWT_ISSUERS: + - AUDIENCE: lms-key + ISSUER: http://localhost:18000/oauth2 + SECRET_KEY: lms-secret + JWT_PUBLIC_SIGNING_JWK_SET: '' +LANGUAGE_CODE: en +LANGUAGE_COOKIE_NAME: openedx-language-preference +LMS_BASE_URL: null +MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage + MEDIA_ROOT: /edx/var/registrar/media + MEDIA_URL: /api/media/ +REGISTRAR_SERVICE_USER: registrar_service_user +SECRET_KEY: hBiEM5pDr8GsZv1lh6GKmD0c9SF5Z00TFEoRY1zSmCxijFrR +SEGMENT_KEY: null +SESSION_EXPIRE_AT_BROWSER_CLOSE: false +SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 +SOCIAL_AUTH_EDX_OAUTH2_KEY: registrar-sso-key +SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout +SOCIAL_AUTH_EDX_OAUTH2_SECRET: registrar-sso-secret +SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 +SOCIAL_AUTH_REDIRECT_IS_HTTPS: false +STATICFILES_STORAGE: django.contrib.staticfiles.storage.StaticFilesStorage +STATIC_ROOT: /edx/var/registrar/staticfiles +TIME_ZONE: UTC diff --git a/docker/build/tools_jenkins/Dockerfile b/docker/build/tools_jenkins/Dockerfile index d32f86e884b..a5726c16d01 100644 --- a/docker/build/tools_jenkins/Dockerfile +++ b/docker/build/tools_jenkins/Dockerfile @@ -1,5 +1,5 @@ FROM edxops/trusty-common:latest -MAINTAINER edxops +LABEL maintainer="edxops" USER root RUN apt-get update @@ -7,6 +7,6 @@ RUN apt-get update ADD . /edx/app/edx_ansible/edx_ansible WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays COPY docker/build/tools_jenkins/ansible_overrides.yml / -RUN PYTHONUNBUFFERED=1 /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook -v jenkins_tools.yml -i '127.0.0.1,' -c local -e@/ansible_overrides.yml -vv -t 'install' +RUN PYTHONUNBUFFERED=1 /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook -v jenkins_tools.yml -i '127.0.0.1,' -c local -e@/ansible_overrides.yml -vv CMD /bin/su -l jenkins --shell=/bin/bash -c "/usr/bin/daemon -f --name=jenkins --inherit --env=JENKINS_HOME=/edx/var/jenkins --output=/var/log/jenkins/jenkins.log --pidfile=/var/run/jenkins/jenkins.pid -- /usr/bin/java -jar /usr/share/jenkins/jenkins.war --webroot=/var/cache/jenkins/war --httpPort=8080 --ajp13Port=-1" diff --git a/docker/build/trusty-common/Dockerfile b/docker/build/trusty-common/Dockerfile index e30ba887198..dd2207bb994 100644 --- a/docker/build/trusty-common/Dockerfile +++ b/docker/build/trusty-common/Dockerfile @@ -1,5 +1,5 @@ FROM ubuntu:trusty -MAINTAINER edxops +LABEL maintainer="edxops" ENV ANSIBLE_REPO="https://github.com/edx/ansible" ENV CONFIGURATION_REPO="https://github.com/edx/configuration.git" ENV CONFIGURATION_VERSION="master" diff --git a/docker/build/xenial-common/Dockerfile b/docker/build/xenial-common/Dockerfile index fcea375dbb4..8e5a1cbee3b 100644 --- a/docker/build/xenial-common/Dockerfile +++ b/docker/build/xenial-common/Dockerfile @@ -1,13 +1,15 @@ +ARG BASE_IMAGE_TAG=latest FROM ubuntu:xenial -MAINTAINER edxops +LABEL maintainer="edxops" # Set locale to UTF-8 which is not the default for docker. # See the links for details: # http://jaredmarkell.com/docker-and-locales/ # https://github.com/docker-library/python/issues/13 # https://github.com/docker-library/python/pull/14/files +# Also install software-properties-common to get apt-add-repository RUN apt-get update &&\ - apt-get install -y locales &&\ + apt-get install -y locales software-properties-common &&\ locale-gen en_US.UTF-8 ENV LANG en_US.UTF-8 ENV LANGUAGE en_US:en @@ -18,6 +20,11 @@ ENV CONFIGURATION_REPO="https://github.com/edx/configuration.git" ARG OPENEDX_RELEASE=master ENV CONFIGURATION_VERSION="${OPENEDX_RELEASE}" +# Add the deadsnakes PPA to install Python 3.8 +RUN apt-add-repository -y ppa:deadsnakes/ppa +RUN apt-get update &&\ + apt-get install -y python3.8-dev python3.8-distutils + ADD util/install/ansible-bootstrap.sh /tmp/ansible-bootstrap.sh RUN chmod +x /tmp/ansible-bootstrap.sh RUN /tmp/ansible-bootstrap.sh diff --git a/docker/build/xqueue/Dockerfile b/docker/build/xqueue/Dockerfile index d4f71d73c43..1b78e553f98 100644 --- a/docker/build/xqueue/Dockerfile +++ b/docker/build/xqueue/Dockerfile @@ -9,8 +9,9 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] +LABEL maintainer="edxops" +ENTRYPOINT ["/edx/app/xqueue/devstack.sh"] +CMD ["start"] USER root RUN apt-get update @@ -24,7 +25,7 @@ RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook xqueue.yml \ -i '127.0.0.1,' \ -c local \ -t "install:base,install:system-requirements,install:configuration,install:app-requirements,install:code,devstack" \ - --extra-vars="xqueue_version=${OPENEDX_RELEASE}" \ + --extra-vars="XQUEUE_VERSION=${OPENEDX_RELEASE}" \ --extra-vars="@/ansible_overrides.yml" EXPOSE 18040 diff --git a/docker/build/xqwatcher/Dockerfile b/docker/build/xqwatcher/Dockerfile index 56d305213d1..8a52ba1553d 100644 --- a/docker/build/xqwatcher/Dockerfile +++ b/docker/build/xqwatcher/Dockerfile @@ -9,7 +9,7 @@ ARG BASE_IMAGE_TAG=latest FROM edxops/xenial-common:${BASE_IMAGE_TAG} -MAINTAINER edxops +LABEL maintainer="edxops" ADD . /edx/app/edx_ansible/edx_ansible COPY docker/build/xqwatcher/ansible_overrides.yml / @@ -23,4 +23,5 @@ RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook xqwatcher.yml \ --extra-vars="XQWATCHER_VERSION=${OPENEDX_RELEASE}" \ --extra-vars="@/ansible_overrides.yml" WORKDIR /edx/app -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] +ENTRYPOINT ["/edx/app/edxapp/devstack.sh"] +CMD ["start"] diff --git a/docker/build/devstack/ansible_overrides.yml b/docker/devstack_common_ansible_overrides.yml similarity index 80% rename from docker/build/devstack/ansible_overrides.yml rename to docker/devstack_common_ansible_overrides.yml index 1bd02a94b02..3dee7815726 100644 --- a/docker/build/devstack/ansible_overrides.yml +++ b/docker/devstack_common_ansible_overrides.yml @@ -1,3 +1,6 @@ + +# These variables are loaded into most devstack images via their Dockerfile + EDXAPP_LMS_BASE: 'edx.devstack.lms:18000' EDXAPP_LMS_ROOT_URL: 'http://{{ EDXAPP_LMS_BASE }}' EDXAPP_LMS_PUBLIC_ROOT_URL: 'http://localhost:18000' diff --git a/docker/plays/analytics_api.yml b/docker/plays/analytics_api.yml index 2b1e13ae326..25565cb0d46 100644 --- a/docker/plays/analytics_api.yml +++ b/docker/plays/analytics_api.yml @@ -6,6 +6,5 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - common_vars - - docker + - common - analytics_api diff --git a/docker/plays/automated.yml b/docker/plays/automated.yml index ad3fc5e2d0b..d04a2644d78 100644 --- a/docker/plays/automated.yml +++ b/docker/plays/automated.yml @@ -3,6 +3,5 @@ become: True gather_facts: True roles: - - common_vars - - docker + - common - automated diff --git a/docker/plays/designer.yml b/docker/plays/designer.yml new file mode 100644 index 00000000000..516cebce9e3 --- /dev/null +++ b/docker/plays/designer.yml @@ -0,0 +1,12 @@ +- name: Deploy designer + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - role: nginx + nginx_default_sites: + - designer + - designer diff --git a/docker/plays/devpi.yml b/docker/plays/devpi.yml index a58a517f696..f05210ec798 100644 --- a/docker/plays/devpi.yml +++ b/docker/plays/devpi.yml @@ -7,6 +7,5 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - common_vars - - docker + - common - devpi diff --git a/docker/plays/docker-tools.yml b/docker/plays/docker-tools.yml index 42f08aedf32..0dfc6dd0839 100644 --- a/docker/plays/docker-tools.yml +++ b/docker/plays/docker-tools.yml @@ -3,5 +3,5 @@ become: True gather_facts: True roles: - - docker + - common - docker-tools diff --git a/docker/plays/ecommerce.yml b/docker/plays/ecommerce.yml index 4d8a9a0dc4c..5e6aa5b839d 100644 --- a/docker/plays/ecommerce.yml +++ b/docker/plays/ecommerce.yml @@ -9,4 +9,7 @@ - role: nginx nginx_default_sites: - ecommerce + - devpi_consumer - ecommerce + - sqlite_fix + - browsers diff --git a/docker/plays/ecomworker.yml b/docker/plays/ecomworker.yml index 2fa4ba8e4b4..63e996af130 100644 --- a/docker/plays/ecomworker.yml +++ b/docker/plays/ecomworker.yml @@ -6,6 +6,5 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - common_vars - - docker + - common - ecomworker diff --git a/docker/plays/edxapp.yml b/docker/plays/edxapp.yml index cbf46f61931..d6199572486 100644 --- a/docker/plays/edxapp.yml +++ b/docker/plays/edxapp.yml @@ -6,8 +6,7 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - common_vars - - docker + - common - devpi_consumer - role: nginx nginx_sites: @@ -17,5 +16,4 @@ - lms nginx_extra_sites: "{{ NGINX_EDXAPP_EXTRA_SITES }}" nginx_extra_configs: "{{ NGINX_EDXAPP_EXTRA_CONFIGS }}" - nginx_redirects: "{{ NGINX_EDXAPP_CUSTOM_REDIRECTS }}" - edxapp diff --git a/docker/plays/elasticsearch.yml b/docker/plays/elasticsearch.yml index 0af9b0a7c1e..3783014eb0a 100644 --- a/docker/plays/elasticsearch.yml +++ b/docker/plays/elasticsearch.yml @@ -2,6 +2,5 @@ become: True roles: - common - - docker - oraclejdk - elasticsearch diff --git a/docker/plays/enterprise_catalog.yml b/docker/plays/enterprise_catalog.yml new file mode 100644 index 00000000000..ba72826b0a4 --- /dev/null +++ b/docker/plays/enterprise_catalog.yml @@ -0,0 +1,12 @@ +- name: Deploy enterprise catalog + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - role: nginx + nginx_default_sites: + - enterprise_catalog + - enterprise_catalog diff --git a/docker/plays/forum.yml b/docker/plays/forum.yml index f620e20c664..4cfd9e0c479 100644 --- a/docker/plays/forum.yml +++ b/docker/plays/forum.yml @@ -6,6 +6,5 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - common_vars - - docker + - common - forum diff --git a/docker/plays/go-agent.yml b/docker/plays/go-agent.yml index 2e4195318fb..ca9742f6e49 100644 --- a/docker/plays/go-agent.yml +++ b/docker/plays/go-agent.yml @@ -5,6 +5,7 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - supervisor - go-agent diff --git a/docker/plays/go-server.yml b/docker/plays/go-server.yml index ebdf589c6e7..7acba2caf69 100644 --- a/docker/plays/go-server.yml +++ b/docker/plays/go-server.yml @@ -5,6 +5,7 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - supervisor - go-server diff --git a/docker/plays/insights.yml b/docker/plays/insights.yml index ee0429aafaf..e31939a774a 100644 --- a/docker/plays/insights.yml +++ b/docker/plays/insights.yml @@ -6,5 +6,5 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - docker + - common - insights diff --git a/docker/plays/jenkins_analytics.yml b/docker/plays/jenkins_analytics.yml index 019e849d478..153db2575c8 100644 --- a/docker/plays/jenkins_analytics.yml +++ b/docker/plays/jenkins_analytics.yml @@ -6,6 +6,5 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - common_vars - - docker + - common - jenkins_analytics diff --git a/docker/plays/jenkins_build.yml b/docker/plays/jenkins_build.yml index cea9349fe7b..25c9a6556a0 100644 --- a/docker/plays/jenkins_build.yml +++ b/docker/plays/jenkins_build.yml @@ -6,6 +6,5 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - common_vars - - docker + - common - jenkins_build diff --git a/docker/plays/jenkins_tools.yml b/docker/plays/jenkins_tools.yml index 6d1aedb8f02..7cab6ae0c58 100644 --- a/docker/plays/jenkins_tools.yml +++ b/docker/plays/jenkins_tools.yml @@ -6,6 +6,5 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - common_vars - - docker + - common - tools_jenkins diff --git a/docker/plays/mongo.yml b/docker/plays/mongo.yml index 7ad91019cc2..25a5268db8a 100644 --- a/docker/plays/mongo.yml +++ b/docker/plays/mongo.yml @@ -3,6 +3,5 @@ become: True gather_facts: True roles: - - common_vars - - docker + - common - mongo_3_2 diff --git a/docker/plays/mysql.yml b/docker/plays/mysql.yml index e493b572bea..ddcdbe720d0 100644 --- a/docker/plays/mysql.yml +++ b/docker/plays/mysql.yml @@ -3,6 +3,5 @@ become: True gather_facts: True roles: - - common_vars - - docker + - common - mysql diff --git a/docker/plays/nginx.yml b/docker/plays/nginx.yml index 3a8e140a7f6..96ab6437888 100644 --- a/docker/plays/nginx.yml +++ b/docker/plays/nginx.yml @@ -6,8 +6,7 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - common_vars - - docker + - common - role: nginx nginx_sites: - lms @@ -19,4 +18,3 @@ - lms nginx_extra_sites: "{{ NGINX_EDXAPP_EXTRA_SITES }}" nginx_extra_configs: "{{ NGINX_EDXAPP_EXTRA_CONFIGS }}" - nginx_redirects: "{{ NGINX_EDXAPP_CUSTOM_REDIRECTS }}" diff --git a/docker/plays/notifier.yml b/docker/plays/notifier.yml index 912e1e686ca..872b6eef58e 100644 --- a/docker/plays/notifier.yml +++ b/docker/plays/notifier.yml @@ -6,6 +6,5 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - common_vars - - docker + - common - notifier diff --git a/docker/plays/rabbitmq.yml b/docker/plays/rabbitmq.yml index ffd760ebc88..458048651f6 100644 --- a/docker/plays/rabbitmq.yml +++ b/docker/plays/rabbitmq.yml @@ -6,6 +6,5 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - common_vars - - docker + - common - rabbitmq diff --git a/docker/plays/registrar.yml b/docker/plays/registrar.yml new file mode 100644 index 00000000000..96ca892b2f3 --- /dev/null +++ b/docker/plays/registrar.yml @@ -0,0 +1,12 @@ +- name: Deploy registrar + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - role: nginx + nginx_default_sites: + - registrar + - registrar diff --git a/docker/plays/xqueue.yml b/docker/plays/xqueue.yml index 2d08c8b1bec..8003b22f456 100644 --- a/docker/plays/xqueue.yml +++ b/docker/plays/xqueue.yml @@ -3,6 +3,5 @@ become: True gather_facts: True roles: - - common_vars - - docker + - common - xqueue diff --git a/docker/plays/xqwatcher.yml b/docker/plays/xqwatcher.yml index 67d6bda4b08..2e9f73a3039 100644 --- a/docker/plays/xqwatcher.yml +++ b/docker/plays/xqwatcher.yml @@ -3,5 +3,5 @@ become: True gather_facts: True roles: - - docker + - common - xqwatcher diff --git a/documentation/0001-ansible-code-conventions.rst b/documentation/0001-ansible-code-conventions.rst new file mode 100644 index 00000000000..8f1ba4d1f1d --- /dev/null +++ b/documentation/0001-ansible-code-conventions.rst @@ -0,0 +1,143 @@ +======================== +Ansible Code Conventions +======================== + +General Conventions +=================== +**Spacing** +* YAML files - All yaml files should use 2 space indents and end with .yml +* Use spaces around jinja variable names. {{ var }} not {{var}} + +**Variables** +* Variables - Use jinja variable syntax over deprecated variable syntax. {{ var }} not $var +* Variables that are environment specific and that need to be overridden should be in ALL CAPS. +* Variables that are internal to the role should be lowercase. +* Prefix all variables defined in a role with the name of the role. Example: EDXAPP_FOO + +**Roles/Plays/Playbooks** +* Keep roles self contained - Roles should avoid including tasks from other roles when possible +* Plays should do nothing more than include a list of roles except where pre_tasks and post_tasks are required (to manage a load balancer for example) +* Plays/Playbooks that apply to the general community should be copied to configuration/playbooks + +**ETC** +* Handlers - Do not use handlers. If you need to restart an app when specific tasks run, just add a task to do so at the end of the playbook. If necessary, it can be skipped with tags (see `Role Life-cycle Tags`_) +* Separators - Use underscores (e.g. my_role) not dashes (my-role). +* Paths - When defining paths, do not include trailing slashes (e.g. my_path: /foo not my_path: /foo/. When concatenating paths, follow the same convention (e.g. {{ my_path }}/bar not {{ my_path }}bar) + +.. _Role Life-cycle Tags: https://openedx.atlassian.net/wiki/spaces/OpenOPS/pages/39584735/Role+Life-cycle+Tags + + +Conditionals and Return Status +============================== + +Always use ``when:`` for conditionals + +.. code-block:: bash + + when: my_var is defined + when: my_var is not defined + +To verify return status (see `ansible docs conditionals`_) + +.. code-block:: yaml + + - command: /bin/false + register: my_result + ignore_errors: True + - debug: msg="task failed" + when: my_result|failed + + +.. _ansible docs conditionals: http://docs.ansible.com/playbooks_conditionals.html + +Formatting +========== + +Use yaml-style blocks. + +Good: + +.. code-block:: yaml + + - file: + dest: "{{ test }}" + src: "./foo.txt" + mode: 0770 + state: present + user: "root" + group: "wheel" + +Bad: + +.. code-block:: yaml + + - file: > + dest={{ test }} src=./foo.txt mode=0770 + state=present user=root group=wheel + +Break long lines using yaml line continuation. `Reference`_ + +.. code-block:: yaml + + - shell: > + python a very long command --with=very --long-options=foo + --and-even=more_options --like-these + + +.. _Reference: http://docs.ansible.com/playbooks_intro.html + +Roles +===== + +**Role Variables** + +- ``common`` role - Contains tasks that apply to all roles. +- ``common_vars`` role - Contains vars that apply to all roles. +- *Roles variables* - Variables specific to a role should be defined in /vars/main.yml. All variables should be prefixed with the role name. +- *Role defaults* - Default variables should configure a role to install edx in such away that all services can run on a single server +- Variables that are environment specific and that need to be overridden should be in all caps. +Every role should have a standard set of role directories, example that includes a python and ruby virtualenv: + +.. code-block:: yaml + + edxapp_data_dir: "{{ COMMON_DATA_DIR }}/edxapp" + edxapp_app_dir: "{{ COMMON_APP_DIR }}/edxapp" + edxapp_log_dir: "{{ COMMON_LOG_DIR }}/edxapp" + edxapp_venvs_dir: "{{ edxapp_app_dir }}/venvs" + edxapp_venv_dir: "{{ edxapp_venvs_dir }}/edxapp" + edxapp_venv_bin: "{{ edxapp_venv_dir }}/bin" + edxapp_rbenv_dir: "{{ edxapp_app_dir }}" + edxapp_rbenv_root: "{{ edxapp_rbenv_dir }}/.rbenv" + edxapp_rbenv_shims: "{{ edxapp_rbenv_root }}/shims" + edxapp_rbenv_bin: "{{ edxapp_rbenv_root }}/bin" + edxapp_gem_root: "{{ edxapp_rbenv_dir }}/.gem" + edxapp_gem_bin: "{{ edxapp_gem_root }}/bin" + + +**Role Naming Conventions** + +- *Role names* - Terse, one word if possible, use underscores if necessary. +- *Role task names* - Terse, descriptive, spaces are OK and should be prefixed with the role name. + +Secure vs. Insecure data +======================== + +As a general policy we want to protect the following data: + +- Usernames +- Public keys (keys are OK to be public, but can be used to figure out usernames) +- Hostnames +- Passwords, API keys + +Directory structure for the secure repository: + +.. code-block:: text + + ansible + ├── files + ├── keys + └── vars + + + +Secure vars are set in files under the ``ansible/vars`` directory. These files will be passed in when the relevant ansible-playbook commands are run. If you need a secure variable defined, give it a name and use it in your playbooks like any other variable. The value should be set in the secure vars files of the relevant deployment (edx, edge, etc.). If you don't have access to this repository, you'll need to submit a ticket to the devops team to make the secure change. diff --git a/documentation/decisions/0000-placeholder.rst b/documentation/decisions/0000-placeholder.rst new file mode 100644 index 00000000000..e69de29bb2d diff --git a/openedx.yaml b/openedx.yaml index e1a8de5c12e..d86da55f844 100644 --- a/openedx.yaml +++ b/openedx.yaml @@ -2,7 +2,13 @@ # http://open-edx-proposals.readthedocs.io/en/latest/oeps/oep-0002.html#specification nick: conf -oeps: {} openedx-release: {ref: master} -owner: edx/devops -track-pulls: true +owner: fredsmith + +supporting_teams: + - edx/devops + +oeps: + oep-2: true + oep-7: true + oep-18: true diff --git a/playbooks/active_instances_in_asg.py b/playbooks/active_instances_in_asg.py index 493ee945334..14ea5661e50 100755 --- a/playbooks/active_instances_in_asg.py +++ b/playbooks/active_instances_in_asg.py @@ -2,7 +2,7 @@ """ Build an ansible inventory list suitable for use by -i by finding the active -Auto Scaling Group in an Elastic Load Balancer. +Auto Scaling Group in an Elastic Load Balancer. If multiple ASGs are active in the ELB, no inventory is returned. @@ -20,6 +20,7 @@ """ from __future__ import print_function +from __future__ import absolute_import import argparse import botocore.session import botocore.exceptions @@ -27,6 +28,7 @@ from collections import defaultdict from os import environ from itertools import chain +import random class ActiveInventory(): @@ -45,13 +47,24 @@ def run(self,asg_name): asg_iterator = asg_paginator.paginate() matching_groups = [] for groups in asg_iterator: - for g in groups['AutoScalingGroups']: - for t in g['Tags']: - if t['Key'] == 'Name' and t['Value'] == asg_name: - matching_groups.append(g) + for asg in groups['AutoScalingGroups']: + asg_inactive = len(asg['SuspendedProcesses']) > 0 + if asg_inactive: + continue + for tag in asg['Tags']: + if tag['Key'] == 'Name' and tag['Value'] == asg_name: + matching_groups.append(asg) + + groups_to_instances = defaultdict(list) + instances_to_groups = {} + + # for all instances in all auto scaling groups + for group in matching_groups: + for instance in group['Instances']: + if instance['LifecycleState'] == 'InService': + groups_to_instances[group['AutoScalingGroupName']].append(instance['InstanceId']) + instances_to_groups[instance['InstanceId']] = group['AutoScalingGroupName'] - groups_to_instances = {group['AutoScalingGroupName']: [instance['InstanceId'] for instance in group['Instances']] for group in matching_groups} - instances_to_groups = {instance['InstanceId']: group['AutoScalingGroupName'] for group in matching_groups for instance in group['Instances'] } # We only need to check for ASGs in an ELB if we have more than 1. # If a cluster is running with an ASG out of the ELB, then there are larger problems. @@ -63,7 +76,7 @@ def run(self,asg_name): instances = elb.describe_instance_health(LoadBalancerName=load_balancer_name) active_instances = [instance['InstanceId'] for instance in instances['InstanceStates'] if instance['State'] == 'InService'] for instance_id in active_instances: - active_groups[instances_to_groups[instance_id]] = 1 + active_groups[instances_to_groups[instance_id]] = 1 # If we found no active groups, because there are no ELBs (edxapp workers normally) elbs = list(chain.from_iterable([group['LoadBalancerNames'] for group in matching_groups])) @@ -85,7 +98,7 @@ def run(self,asg_name): for group in active_groups.keys(): for group_instance in groups_to_instances[group]: - instance = ec2.describe_instances(InstanceIds=[group_instance])['Reservations'][0]['Instances'][0] + instance = random.choice(ec2.describe_instances(InstanceIds=[group_instance])['Reservations'][0]['Instances']) if 'PrivateIpAddress' in instance: print("{},".format(instance['PrivateIpAddress'])) return # We only want a single IP diff --git a/playbooks/aide.yml b/playbooks/aide.yml index df3b0496b4c..36f5ab46fa3 100644 --- a/playbooks/aide.yml +++ b/playbooks/aide.yml @@ -6,7 +6,8 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - aide - role: datadog when: COMMON_ENABLE_DATADOG diff --git a/playbooks/alton.yml b/playbooks/alton.yml index 3ff4dcbdcc4..9865706a34c 100644 --- a/playbooks/alton.yml +++ b/playbooks/alton.yml @@ -8,5 +8,6 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - alton diff --git a/playbooks/analytics_single.yml b/playbooks/analytics_single.yml index e0c7af171db..fbe2bbbda3e 100644 --- a/playbooks/analytics_single.yml +++ b/playbooks/analytics_single.yml @@ -1,3 +1,14 @@ +--- + +# Open edX Native installation for single server analytics installs. + +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python + - name: Deploy all analytics services to a single node hosts: all become: True @@ -7,14 +18,32 @@ disable_edx_services: false ENABLE_DATADOG: False ENABLE_NEWRELIC: False + SANDBOX_ENABLE_ANALYTICS_API: True + SANDBOX_ENABLE_ANALYTICS_PIPELINE: True + SANDBOX_ENABLE_INSIGHTS: True + EDXAPP_MYSQL_HOST: '' + EDXAPP_MEMCACHE: '' + POSTFIX_QUEUE_EXTERNAL_SMTP_HOST: '' roles: - - aws - - mysql - - edxlocal - - memcache - - analytics_api - - analytics_pipeline + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: edxlocal + when: EDXAPP_MYSQL_HOST == 'localhost' + - role: memcache + when: "'localhost' in ' '.join(EDXAPP_MEMCACHE)" + - role: nginx + nginx_sites: + - analytics_api + when: SANDBOX_ENABLE_ANALYTICS_API + - role: analytics_api + when: SANDBOX_ENABLE_ANALYTICS_API + - role: analytics_pipeline + when: SANDBOX_ENABLE_ANALYTICS_PIPELINE - role: nginx nginx_sites: - insights - - insights + when: SANDBOX_ENABLE_INSIGHTS + - role: insights + when: SANDBOX_ENABLE_INSIGHTS + - role: postfix_queue + when: POSTFIX_QUEUE_EXTERNAL_SMTP_HOST != '' diff --git a/playbooks/analyticsapi.yml b/playbooks/analyticsapi.yml index 3fe4e7677fb..5551301b698 100644 --- a/playbooks/analyticsapi.yml +++ b/playbooks/analyticsapi.yml @@ -7,7 +7,8 @@ ENABLE_NEWRELIC: False CLUSTER_NAME: 'analytics-api' roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_default_sites: - analytics_api @@ -20,3 +21,6 @@ when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE - role: datadog-uninstall when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'analytics_api' + when: ANALYTICS_API_HERMES_ENABLED diff --git a/playbooks/android_sdk.yml b/playbooks/android_sdk.yml new file mode 100644 index 00000000000..54a79f45095 --- /dev/null +++ b/playbooks/android_sdk.yml @@ -0,0 +1,10 @@ +# Configure a system to compile, test and sign the Android client. Use this +# play to configure local testing environments. If you need to configure a +# Jenkins worker for Android tasks, use playbooks/jenkins_worker_android.yml +--- +- name: Configure a system for building and testing the edX Android app + hosts: localhost + become: True + gather_facts: True + roles: + - android_sdk diff --git a/playbooks/antivirus.yml b/playbooks/antivirus.yml index 8af40faa125..dc7c9335607 100644 --- a/playbooks/antivirus.yml +++ b/playbooks/antivirus.yml @@ -3,7 +3,8 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - antivirus - role: datadog when: COMMON_ENABLE_DATADOG diff --git a/playbooks/aws.yml b/playbooks/aws.yml index b913211e54b..6cfd5ce7f39 100644 --- a/playbooks/aws.yml +++ b/playbooks/aws.yml @@ -7,4 +7,5 @@ serial: "{{ serial_count }}" roles: - common - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE diff --git a/playbooks/blockstore.yml b/playbooks/blockstore.yml new file mode 100644 index 00000000000..eb33aab349b --- /dev/null +++ b/playbooks/blockstore.yml @@ -0,0 +1,21 @@ +- name: Deploy blockstore + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: True + CLUSTER_NAME: 'blockstore' + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - blockstore + - blockstore + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: hermes + HERMES_TARGET_SERVICE: 'blockstore' + when: BLOCKSTORE_HERMES_ENABLED diff --git a/playbooks/bootstrap_python.yml b/playbooks/bootstrap_python.yml index 85000756a6c..cd4b5df2395 100644 --- a/playbooks/bootstrap_python.yml +++ b/playbooks/bootstrap_python.yml @@ -1,7 +1,7 @@ --- # Runs the python bootstratpping role against an ubuntu machine # This is not as complete as ansible_bootstrap.sh (intentionally so) -# This lets you get pythong2.7 installed on a machine so you can followup +# This lets you get python2.7 installed on a machine so you can followup # with your actual playbook or role. The key is gather_facts: False. # # Usage: diff --git a/playbooks/callback_plugins/sqs.py b/playbooks/callback_plugins/sqs.py index 879b94c2fc6..980d5f1b0d3 100644 --- a/playbooks/callback_plugins/sqs.py +++ b/playbooks/callback_plugins/sqs.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# From https://github.com/ansible/ansible/issues/31527#issuecomment-335495855 +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import os import sys @@ -63,19 +67,19 @@ def __init__(self): self.start_time = time.time() if not 'SQS_REGION' in os.environ: - print 'ANSIBLE_ENABLE_SQS enabled but SQS_REGION ' \ - 'not defined in environment' + print('ANSIBLE_ENABLE_SQS enabled but SQS_REGION ' \ + 'not defined in environment') sys.exit(1) self.region = os.environ['SQS_REGION'] try: self.sqs = boto.sqs.connect_to_region(self.region) except NoAuthHandlerFound: - print 'ANSIBLE_ENABLE_SQS enabled but cannot connect ' \ - 'to AWS due invalid credentials' + print('ANSIBLE_ENABLE_SQS enabled but cannot connect ' \ + 'to AWS due invalid credentials') sys.exit(1) if not 'SQS_NAME' in os.environ: - print 'ANSIBLE_ENABLE_SQS enabled but SQS_NAME not ' \ - 'defined in environment' + print('ANSIBLE_ENABLE_SQS enabled but SQS_NAME not ' \ + 'defined in environment') sys.exit(1) self.name = os.environ['SQS_NAME'] self.queue = self.sqs.create_queue(self.name) @@ -149,7 +153,7 @@ def _send_queue_message(self, msg, msg_type): self.sqs.send_message(self.queue, json.dumps(payload)) break except socket.gaierror as e: - print 'socket.gaierror will retry: ' + e + print('socket.gaierror will retry: ' + e) time.sleep(1) except Exception as e: raise e diff --git a/playbooks/callback_plugins/task_timing.py b/playbooks/callback_plugins/task_timing.py deleted file mode 100644 index 1df586195e2..00000000000 --- a/playbooks/callback_plugins/task_timing.py +++ /dev/null @@ -1,287 +0,0 @@ -import collections -from datetime import datetime, timedelta -import json -import logging -import os -from os.path import splitext, basename, exists, dirname -import sys -import time - -try: - from ansible.plugins.callback import CallbackBase -except ImportError: - # Support Ansible 1.9.x - CallbackBase = object - -import datadog - -logging.basicConfig(level=logging.INFO, stream=sys.stdout) -logging.getLogger("requests").setLevel(logging.WARNING) -logging.getLogger("dd").setLevel(logging.WARNING) -LOGGER = logging.getLogger(__name__) - -""" -Originally written by 'Jharrod LaFon' -#https://github.com/jlafon/ansible-profile/blob/master/callback_plugins/profile_tasks.py - -""" - -ANSIBLE_TIMER_LOG = os.environ.get('ANSIBLE_TIMER_LOG') - - -class Timestamp(object): - """ - A class for capturing start, end and duration for an action. - """ - def __init__(self): - self.start = datetime.utcnow() - self.end = None - - def stop(self): - """ - Record the end time of the timed period. - """ - self.end = datetime.utcnow() - - @property - def duration(self): - """ - Return the duration that this Timestamp covers. - """ - return self.end - self.start - - -# This class only has a single method (which would ordinarily make it a -# candidate to be turned into a function). However, the TimingLoggers are -# instanciated once when ansible starts up, and then called for every play. -class TimingLogger(object): - """ - Base-class for logging timing about ansible tasks and plays. - """ - def log_play(self, playbook_name, playbook_timestamp, results): - """ - Record the timing results of an ansible play. - - Arguments: - playbook_name: the name of the playbook being logged. - playbook_timestamp (Timestamp): the timestamps measuring how - long the play took. - results (dict(string -> Timestamp)): a dict mapping task names - to Timestamps that measure how long each task took. - """ - pass - - -class DatadogTimingLogger(TimingLogger): - """ - Record ansible task and play timing to Datadog. - - Requires that the environment variable DATADOG_API_KEY be set in order - to log any data. - """ - def __init__(self): - super(DatadogTimingLogger, self).__init__() - - self.datadog_api_key = os.getenv('DATADOG_API_KEY') - self.datadog_api_initialized = False - - if self.datadog_api_key: - datadog.initialize( - api_key=self.datadog_api_key, - app_key=None - ) - self.datadog_api_initialized = True - - def clean_tag_value(self, value): - """ - Remove any characters that aren't allowed in Datadog tags. - - Arguments: - value: the string to be cleaned. - """ - return value.replace(" | ", ".").replace(" ", "-").lower() - - def log_play(self, playbook_name, playbook_timestamp, results): - if not self.datadog_api_initialized: - return - - datadog_tasks_metrics = [] - for name, timestamp in results.items(): - datadog_tasks_metrics.append({ - 'metric': 'edx.ansible.task_duration', - 'date_happened': time.mktime(timestamp.start.timetuple()), - 'points': timestamp.duration.total_seconds(), - 'tags': [ - 'task:{0}'.format(self.clean_tag_value(name)), - 'playbook:{0}'.format(self.clean_tag_value(playbook_name)) - ] - }) - try: - datadog.api.Metric.send(datadog_tasks_metrics) - datadog.api.Metric.send( - metric="edx.ansible.playbook_duration", - date_happened=time.mktime(playbook_timestamp.start.timetuple()), - points=playbook_timestamp.duration.total_seconds(), - tags=["playbook:{0}".format(self.clean_tag_value(playbook_name))] - ) - except Exception: - LOGGER.exception("Failed to log timing data to datadog") - - -class JsonTimingLogger(TimingLogger): - """ - Record task and play timing to a local file in a JSON format. - - Requires that the environment variable ANSIBLE_TIMER_LOG be set in order - to log any data. This specifies the file that timing data should be logged - to. That variable can include strftime interpolation variables, - which will be replaced with the start time of the play. - """ - def log_play(self, playbook_name, playbook_timestamp, results): - # N.B. This is intended to provide a consistent interface and message - # format across all of Open edX tooling, so it deliberately eschews - # standard python logging infrastructure. - if ANSIBLE_TIMER_LOG is None: - return - - messages = [] - for name, timestamp in results.items(): - messages.append({ - 'task': name, - 'playbook': playbook_name, - 'started_at': timestamp.start.isoformat(), - 'ended_at': timestamp.end.isoformat(), - 'duration': timestamp.duration.total_seconds(), - }) - - messages.append({ - 'playbook': playbook_name, - 'started_at': playbook_timestamp.start.isoformat(), - 'ended_at': playbook_timestamp.end.isoformat(), - 'duration': playbook_timestamp.duration.total_seconds(), - }) - - log_path = playbook_timestamp.start.strftime(ANSIBLE_TIMER_LOG) - - try: - log_dir = dirname(log_path) - if log_dir and not exists(log_dir): - os.makedirs(log_dir) - - with open(log_path, 'a') as outfile: - for log_message in messages: - json.dump( - log_message, - outfile, - separators=(',', ':'), - sort_keys=True, - ) - outfile.write('\n') - except Exception: - LOGGER.exception("Unable to write json timing log messages") - - -class LoggingTimingLogger(TimingLogger): - """ - Log timing information for the play and the top 10 tasks to stdout. - """ - def log_play(self, playbook_name, playbook_timestamp, results): - - # Sort the tasks by their running time - sorted_results = sorted( - results.items(), - key=lambda (task, timestamp): timestamp.duration, - reverse=True - ) - - for name, timestamp in sorted_results[:10]: - LOGGER.info( - "{0:-<80}{1:->8}".format( - ' {0} '.format(name), - ' {0:.02f}s'.format(timestamp.duration.total_seconds()), - ) - ) - - LOGGER.info( - "\nPlaybook %s finished: %s, %d total tasks. %s elapsed. \n", - playbook_name, - playbook_timestamp.end, - len(results), - playbook_timestamp.duration, - ) - - -class CallbackModule(CallbackBase): - - """ - Ansible plugin get the time of each task and total time - to run the complete playbook - """ - def __init__(self): - self.stats = collections.defaultdict(list) - self.current_task = None - self.playbook_name = None - self.playbook_timestamp = None - self.play = None - - self.loggers = [ - DatadogTimingLogger(), - LoggingTimingLogger(), - JsonTimingLogger(), - ] - - def v2_playbook_on_play_start(self, play): - self.play = play - super(CallbackModule, self).v2_playbook_on_play_start(play) - - def playbook_on_play_start(self, pattern): - """ - Record the start of a play. - """ - self.playbook_name, _ = splitext( - basename(self.play.get_name()) - ) - self.playbook_timestamp = Timestamp() - - def playbook_on_task_start(self, name, is_conditional): - """ - Logs the start of each task - """ - - if self.current_task is not None: - # Record the running time of the last executed task - self.stats[self.current_task][-1].stop() - - # Record the start time of the current task - self.current_task = name - self.stats[self.current_task].append(Timestamp()) - - def playbook_on_stats(self, stats): - """ - Prints the timing of each task and total time to - run the complete playbook - """ - # Record the timing of the very last task, we use it here, because we - # don't have stop task function by default - if self.current_task is not None: - self.stats[self.current_task][-1].stop() - - self.playbook_timestamp.stop() - - # Flatten the stats so that multiple runs of the same task get listed - # individually. - flat_stats = {} - for task_name, task_runs in self.stats.iteritems(): - if len(task_runs) == 1: - flat_stats[task_name] = task_runs[0] - else: - for i, run in enumerate(task_runs): - run_name = "{} [{}]".format(task_name, i) - flat_stats[run_name] = run - - for logger in self.loggers: - logger.log_play( - self.playbook_name, - self.playbook_timestamp, - flat_stats, - ) diff --git a/playbooks/certs.yml b/playbooks/certs.yml index 9846765e3fa..5d2b2672eb1 100644 --- a/playbooks/certs.yml +++ b/playbooks/certs.yml @@ -6,7 +6,8 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - certs - role: datadog when: COMMON_ENABLE_DATADOG diff --git a/playbooks/cluster_rabbitmq.yml b/playbooks/cluster_rabbitmq.yml index ee36e52ff15..ea5e5ce6eb4 100644 --- a/playbooks/cluster_rabbitmq.yml +++ b/playbooks/cluster_rabbitmq.yml @@ -13,7 +13,7 @@ serial_count: 1 serial: "{{ serial_count }}" pre_tasks: - - action: ec2_facts + - action: ec2_metadata_facts - debug: var: "{{ ansible_ec2_instance_id }}" when: elb_pre_post diff --git a/playbooks/commoncluster.yml b/playbooks/commoncluster.yml index d53ee259e11..28a2ede2dc9 100644 --- a/playbooks/commoncluster.yml +++ b/playbooks/commoncluster.yml @@ -12,7 +12,7 @@ serial_count: 1 serial: "{{ serial_count }}" pre_tasks: - - action: ec2_facts + - action: ec2_metadata_facts when: elb_pre_post - debug: var="{{ ansible_ec2_instance_id }}" when: elb_pre_post @@ -26,7 +26,8 @@ become: False when: elb_pre_post roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: datadog when: COMMON_ENABLE_DATADOG - role: splunkforwarder diff --git a/playbooks/conductor.yml b/playbooks/conductor.yml new file mode 100644 index 00000000000..11522c137fa --- /dev/null +++ b/playbooks/conductor.yml @@ -0,0 +1,25 @@ +- name: Deploy conductor (router for learner portal) + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'conductor' + NGINX_OVERRIDE_DEFAULT_MAP_HASH_SIZE: True + NGINX_MAP_HASH_MAX_SIZE: 4096 + NGINX_MAP_HASH_BUCKET_SIZE: 128 + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_app_dir: "/etc/nginx" + nginx_sites: + - conductor + nginx_default_sites: + - conductor + CONDUCTOR_NGINX_PORT: 8000 + - role: conductor + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/continuous_delivery/cleanup.yml b/playbooks/continuous_delivery/cleanup.yml index 851613fcb39..bbe51dc4bff 100644 --- a/playbooks/continuous_delivery/cleanup.yml +++ b/playbooks/continuous_delivery/cleanup.yml @@ -10,9 +10,6 @@ # # Other variables # - ec2_region - The region used to create the AMI -# - hipchat_token - API token to send messages to hipchat -# - hipchat_room - ID or name of the room to send the notification -# - hipchat_url - URL of the hipchat API (defaults to v1 of the api) # # Example command line to run this playbook: # ansible-playbook -vvvv -i "localhost," -c local \ @@ -26,7 +23,6 @@ ec2_region: us-east-1 ec2_timeout: 300 artifact_path: /tmp/ansible-runtime - hipchat_url: https://api.hipchat.com/v2/ gather_facts: False connection: local tasks: diff --git a/playbooks/continuous_delivery/create_ami.yml b/playbooks/continuous_delivery/create_ami.yml index f8a2e3f34f8..d7c9be1e309 100644 --- a/playbooks/continuous_delivery/create_ami.yml +++ b/playbooks/continuous_delivery/create_ami.yml @@ -17,9 +17,6 @@ # - ami_wait - (yes/no) should ansible pause while # - no_reboot - (yes/no) should the instance not be rebooted during AMI creation # - artifact_path - the path to where this ansible run stores the artifacts for the pipeline -# - hipchat_token - API token to send messages to hipchat -# - hipchat_room - ID or name of the room to send the notification -# - hipchat_url - URL of the hipchat API (defaults to v1 of the api) # - extra_name_identifier - Makes each AMI unique if desired - Default: 0 # - version_tags - A mapping of {app: [repo, version], ...}, used to generate # a "version:app = repo version" tag on the AMI @@ -43,7 +40,6 @@ ami_creation_timeout: 3600 no_reboot: no artifact_path: /tmp/ansible-runtime - hipchat_url: https://api.hipchat.com/v2/ extra_name_identifier: 0 gather_facts: False connection: local @@ -110,14 +106,3 @@ src: templates/local/ami_template.yml.j2 dest: "{{ artifact_path }}/ami.yml" mode: 0600 - - - name: Send Hipchat notification AMI is finished baking - hipchat: - api: "{{ hipchat_url }}" - token: "{{ hipchat_token }}" - room: "{{ hipchat_room }}" - msg: "Finished baking AMI for: {{ edx_environment }}-{{ deployment }}-{{ play }} \n - AMI-ID: {{ ami_register.image_id }} \n - " - ignore_errors: yes - when: hipchat_token is defined diff --git a/playbooks/continuous_delivery/rollback_migrations.yml b/playbooks/continuous_delivery/rollback_migrations.yml index a13b9bfe0a9..393bc6e2430 100644 --- a/playbooks/continuous_delivery/rollback_migrations.yml +++ b/playbooks/continuous_delivery/rollback_migrations.yml @@ -20,10 +20,7 @@ # - migration_result - the filename where the migration output is saved # - SUB_APPLICATION_NAME - used for migrations in edxapp {lms|cms}, must be specified # when APPLICATION_NAME is edxapp -# - EDX_PLATFORM_SETTINGS - The settings to use for the edx platform {aws|devstack} DEFAULT: aws -# - HIPCHAT_TOKEN - API token to send messages to hipchat -# - HIPCHAT_ROOM - ID or name of the room to send the notification -# - HIPCHAT_URL - URL of the hipchat API (defaults to v1 of the api) +# - EDX_PLATFORM_SETTINGS - The settings to use for the edx platform {production|devstack} DEFAULT: production # # Example command line to run this playbook: # ansible-playbook \ @@ -48,7 +45,7 @@ vars: COMMAND_PREFIX: ". {{ APPLICATION_PATH }}/{{ APPLICATION_NAME }}_env; DB_MIGRATION_USER={{ DB_MIGRATION_USER }} DB_MIGRATION_PASS='{{ DB_MIGRATION_PASS }}' /edx/bin/python.{{ APPLICATION_NAME }} /edx/bin/manage.{{ APPLICATION_NAME }} " - EDX_PLATFORM_SETTINGS: "aws" + EDX_PLATFORM_SETTINGS: "production" rollback_result: rollback_result.yml original_state: original_state.yml migration_plan: migration_plan.yml @@ -129,12 +126,3 @@ mode: 0700 with_items: - "{{ migration_files.stdout_lines }}" - - - name: Send Hipchat notification cleanup has finished - hipchat: - api: "{{ HIPCHAT_URL }}" - token: "{{ HIPCHAT_TOKEN }}" - room: "{{ HIPCHAT_ROOM }}" - msg: "Migrations have completed." - ignore_errors: yes - when: HIPCHAT_TOKEN is defined diff --git a/playbooks/continuous_delivery/run_management_command.yml b/playbooks/continuous_delivery/run_management_command.yml index 6ff1fc26a90..f0458b30bec 100644 --- a/playbooks/continuous_delivery/run_management_command.yml +++ b/playbooks/continuous_delivery/run_management_command.yml @@ -10,7 +10,7 @@ # - COMMAND - name of the management command to be run # # Other variables: -# - EDX_PLATFORM_SETTINGS - The settings to use for the edx platform {aws|devstack} DEFAULT: aws +# - EDX_PLATFORM_SETTINGS - The settings to use for the edx platform {production|devstack} DEFAULT: production # # Example command line to run this playbook: # ansible-playbook -vvvv -i "localhost," -c local \ @@ -22,7 +22,7 @@ - hosts: all vars: - EDX_PLATFORM_SETTINGS: "aws" + EDX_PLATFORM_SETTINGS: "production" COMMAND_PREFIX: " . {{ APPLICATION_PATH }}/{{ APPLICATION_NAME }}_env; /edx/bin/python.{{ APPLICATION_NAME }} /edx/bin/manage.{{ APPLICATION_NAME }}" gather_facts: False become: True diff --git a/playbooks/continuous_delivery/run_migrations.yml b/playbooks/continuous_delivery/run_migrations.yml index e301a9df7f4..3a8da41170e 100644 --- a/playbooks/continuous_delivery/run_migrations.yml +++ b/playbooks/continuous_delivery/run_migrations.yml @@ -17,14 +17,11 @@ # - DB_MIGRATION_PASS - the database password # # Other variables: -# - HIPCHAT_TOKEN - API token to send messages to hipchat -# - HIPCHAT_ROOM - ID or name of the room to send the notification -# - HIPCHAT_URL - URL of the hipchat API (defaults to v1 of the api) # - migration_plan - the filename where the unapplied migration YAML output is stored # - migration_result - the filename where the migration output is saved # - SUB_APPLICATION_NAME - used for migrations in edxapp {lms|cms}, must be specified # when APPLICATION_NAME is edxapp -# - EDX_PLATFORM_SETTINGS - The settings to use for the edx platform {aws|devstack} DEFAULT: aws +# - EDX_PLATFORM_SETTINGS - The settings to use for the edx platform {production|devstack} DEFAULT: production # # Example command line to run this playbook: # ansible-playbook -vvvv -i "localhost," -c local \ @@ -38,8 +35,7 @@ vars: migration_plan: migration_plan.yml migration_result: migration_result.yml - HIPCHAT_URL: https://api.hipchat.com/v2/ - EDX_PLATFORM_SETTINGS: "aws" + EDX_PLATFORM_SETTINGS: "production" COMMAND_PREFIX: " . {{ APPLICATION_PATH }}/{{ APPLICATION_NAME }}_env; DB_MIGRATION_USER={{ DB_MIGRATION_USER }} DB_MIGRATION_PASS={{ DB_MIGRATION_PASS }} /edx/bin/python.{{ APPLICATION_NAME }} /edx/bin/manage.{{ APPLICATION_NAME }}" vars_files: - roles/edxapp/defaults/main.yml @@ -87,12 +83,3 @@ mode: 0700 with_items: - "{{ migration_files.stdout_lines }}" - - - name: Send Hipchat notification cleanup has finished - hipchat: - api: "{{ HIPCHAT_URL }}" - token: "{{ HIPCHAT_TOKEN }}" - room: "{{ HIPCHAT_ROOM }}" - msg: "Migrations have completed." - ignore_errors: yes - when: HIPCHAT_TOKEN is defined diff --git a/playbooks/continuous_delivery/upload_assets.yml b/playbooks/continuous_delivery/upload_assets.yml new file mode 100644 index 00000000000..c4fd96eb20a --- /dev/null +++ b/playbooks/continuous_delivery/upload_assets.yml @@ -0,0 +1,46 @@ +# This playbook will upload assets from a django service to an S3 bucket +# +# +# Required variables for this playbook: +# +# - APPLICATION_PATH - the top-level path of the Django application; the application lives underneath +# this directory in a directory with the same name as APPLICATION_NAME. +# - APPLICATION_NAME - The name of the application that we are running against +# - APPLICATION_USER - user which is meant to run the application +# - BUCKET_PATH - name of the bucket to upload assets to +# +# Other variables: +# - EDX_PLATFORM_SETTINGS - The settings to use for the edx platform {production|devstack} DEFAULT: production +# +# Example command line to run this playbook: +# ansible-playbook -vvvv -i "localhost," -c local \ +# -e @overrides.yml \ +# run_management_command.yml +# + + + +- hosts: all + vars: + EDX_PLATFORM_SETTINGS: "production" + # Both LMS and Studio gather their assets to the same directory, + # so most of the time leaving the default sub-application will be fine. + SUB_APPLICATION_NAME: "lms" + COMMAND_PREFIX: " . {{ APPLICATION_PATH }}/{{ APPLICATION_NAME }}_env; /edx/bin/python.{{ APPLICATION_NAME }} /edx/bin/manage.{{ APPLICATION_NAME }}" + STATIC_ROOT: >- + $({{ COMMAND_PREFIX }} shell --command "from django.conf import settings; print(getattr(settings, 'STATIC_ROOT', ''))") + STATIC_ROOT_EDXAPP: >- + $({{ COMMAND_PREFIX }} {{ SUB_APPLICATION_NAME }} shell --settings "{{ EDX_PLATFORM_SETTINGS }}" --command "from django.conf import settings; print(getattr(settings, 'STATIC_ROOT', ''))") + gather_facts: False + become: True + tasks: + + - name: sync assets to s3 + shell: 'aws s3 sync {{ STATIC_ROOT }} {{ BUCKET_PATH }}' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME != "edxapp" + + - name: sync edxapp assets to s3 + shell: 'aws s3 sync {{ STATIC_ROOT_EDXAPP }} {{ BUCKET_PATH }}' + become_user: "{{ APPLICATION_USER }}" + when: APPLICATION_NAME == "edxapp" diff --git a/playbooks/create_db_and_users.yml b/playbooks/create_db_and_users.yml index b400e2e4308..75fa30de134 100644 --- a/playbooks/create_db_and_users.yml +++ b/playbooks/create_db_and_users.yml @@ -79,6 +79,34 @@ login_user: "{{ item.login_user }}" login_password: "{{ item.login_password }}" append_privs: yes + when: item.mysql_plugin is not defined with_items: "{{ database_users }}" tags: - users + + # If plugin is AWSAuthenticationPlugin, then we can’t create mysql user using the mysql_user module + # to create user for AWS RDS IAM authentication as it does not support plugin as parameter + + - name: create mysql users for AWS RDS IAM authentication + shell: | + mysql -u"{{ item.login_user }}" -p"{{ item.login_password }}" -h"{{ item.login_host }}" -e "SET @sql := CASE WHEN (SELECT count(*) FROM mysql.user WHERE User='{{ item.name }}') = 0 THEN 'CREATE USER {{ item.name }} IDENTIFIED WITH AWSAuthenticationPlugin as \'RDS\'' ELSE 'Select 0' END;PREPARE stmt FROM @sql;EXECUTE stmt;DEALLOCATE PREPARE stmt" + when: item.mysql_plugin is defined and item.state == 'present' and item.mysql_plugin == 'AWSAuthenticationPlugin' + with_items: "{{ database_users }}" + tags: + - users + + - name: assign privileges to AWS RDS IAM users + shell: | + mysql -u"{{ item.login_user }}" -p"{{ item.login_password }}" -h"{{ item.login_host }}" -e "GRANT {{ item.privileges }} to '{{ item.name }}'@'{{ item.host }}'REQUIRE SSL" + when: item.mysql_plugin is defined and item.state == 'present' and item.mysql_plugin == 'AWSAuthenticationPlugin' + with_items: "{{ database_users }}" + tags: + - users + + # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/mysql_rds_set_configuration.html + - name: Set binlog retention length + shell: | + mysql -u"{{ database_connection.login_user }}" -p"{{ database_connection.login_password }}" -h"{{ database_connection.login_host }}" -e "call mysql.rds_set_configuration('binlog retention hours', {{RDS_BINLOG_RETENTION_HOURS | default(168)}});" + when: RDS_BINLOG_RETENTION_HOURS is defined + tags: + - users diff --git a/playbooks/create_rds.yml b/playbooks/create_rds.yml new file mode 100644 index 00000000000..c356f47d166 --- /dev/null +++ b/playbooks/create_rds.yml @@ -0,0 +1,76 @@ +# This play will create an RDS for an application. +# It can be run like so: +# +# ansible-playbook -c local -i 'localhost,' create_rds.yml -e@./db.yml +# +# where the content of db.yml contains the following settings +# +# It can read from the same config as create_dbs_and_users.yml and needs this +# part of that config +# database_connection: +# login_host: "{{ database_name }}......rds.amazonaws.com" # This is pretty predictable for our accounts. +# login_user: "root" +# login_password: "" # max 41 characters, Only printable ASCII characters besides '/', '@', '\"', ' ' may be used. +# database_name: your-database-name +# database_size: number of gigabytes (integer) +# instance_type: Choose an AWS RDS instance type such as "db.t2.medium" +# aws_region: a full region (such as us-east-1 or us-west-2) not an AZ +# database_engine_version: You should use either or standard or the newest possible, such as "5.6.39" +# maintenance_window: UTC time and day of week to allow maintenance "Mon:16:00-Mon:16:30" +# vpc_security_groups: What security group in the VPC your RDS should belong to (this is separate from your app or elb SG) +# subnet_group: a name of a group in the RDS console that contains subnets, it will pick the appropriate one +# parameter_group: name of the parameter group with overriddent defaults for this RDS +# backup_window: UTC time of the day to take a backup "08:00-08:30" +# backup_retention: Days to keep backups (integer) +# multi_zone: yes or no (whether this RDS is multi-az) +# performance_insights: yes or no (or unset) whether to enable Performance Insights (must be 5.6.40 or greater and not a t2) +# tags: "[{'Key': 'environment', 'Value': 'TBD'}, {'Key': 'deployment', 'Value': 'TBD'}, {'Key': 'deployment', 'Value': 'TBD'}]" + + +- name: Create databases and users + hosts: all + gather_facts: False + tasks: + +# The rds module for ansible only uses boto2, and boto2 defaults to magnetic discs and will +# use io1 if you specify piops, but you can't have gp2. +# Adapted from https://github.com/ansible/ansible-modules-core/issues/633 +# which points you to the various other open github issues. + + - name: Create RDS instance using SSD (gp2) storage + command: "aws rds create-db-instance + --db-instance-identifier {{ database_name }} + --storage-type gp2 + --allocated-storage {{ database_size }} + --db-instance-class {{ instance_type }} + --engine {{ database_engine|default('MySQL') }} + --engine-version {{ database_engine_version }} + --master-username {{ database_connection.login_user }} + --master-user-password {{ database_connection.login_password }} + --vpc-security-group-ids {{ vpc_security_groups}} + --db-subnet-group-name {{ subnet_group }} + --preferred-maintenance-window {{ maintenance_window }} + --db-parameter-group-name {{ parameter_group }} + --backup-retention-period {{ backup_retention }} + --preferred-backup-window {{ backup_window }} + --{{ '' if multi_zone == 'yes' else 'no-' }}multi-az + {{ '--enable-performance-insights' if performance_insights is defined and performance_insights == 'yes' else '' }} + --tags '{{ tags }}' + " + register: result + failed_when: > + result.rc != 0 and ('DBInstanceAlreadyExists' not in result.stderr) + changed_when: "result.rc == 0" + + - name: Wait RDS to be available + rds: + command: facts + region: "{{ aws_region }}" + instance_name: "{{ database_name }}" + register: result + until: result.instance.status == "available" + retries: 20 + delay: 60 + +- include: create_db_and_users.yml + when: database_connection.login_host is defined diff --git a/playbooks/create_rds_secondary.yml b/playbooks/create_rds_secondary.yml deleted file mode 100644 index 95f40ac1080..00000000000 --- a/playbooks/create_rds_secondary.yml +++ /dev/null @@ -1,34 +0,0 @@ -# Usage: AWS_PROFILE=myprofile ansible-playbook create_csmh_db.yml -i localhost, -e 'from_db=my-rds-identifier rds_name=env-dep-edxapphistory' - -- name: Create new edxapp history RDS instance - hosts: all - connection: local - gather_facts: false - vars: - from_db: null - rds_name: null - region: us-east-1 - instance_type: db.m4.large - env: null - app: edxapp - - tasks: - - name: Validate arguments - fail: - msg: "One or more arguments were not set correctly: {{ item }}" - when: not item - with_items: - - from_db - - rds_name - - - name: Create edxapp history RDS instance - rds: - command: replicate - instance_name: "{{ rds_name }}" - source_instance: "{{ from_db }}" - region: "{{ region }}" - instance_type: "{{ instance_type }}" - publicly_accessible: no - wait: yes - wait_timeout: 900 - register: created_db diff --git a/playbooks/credentials.yml b/playbooks/credentials.yml index a26bc5d197a..625d6ea8341 100644 --- a/playbooks/credentials.yml +++ b/playbooks/credentials.yml @@ -7,7 +7,8 @@ ENABLE_NEWRELIC: False CLUSTER_NAME: 'credentials' roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_default_sites: - credentials @@ -20,3 +21,6 @@ when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE - role: datadog-uninstall when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'credentials' + when: CREDENTIALS_HERMES_ENABLED diff --git a/playbooks/deploy_nginx_all_roles.yml b/playbooks/deploy_nginx_all_roles.yml index c0897ecc870..5a3bdc1e2a8 100644 --- a/playbooks/deploy_nginx_all_roles.yml +++ b/playbooks/deploy_nginx_all_roles.yml @@ -8,7 +8,8 @@ - roles/xserver/defaults/main.yml roles: - common - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_sites: - cms diff --git a/playbooks/designer.yml b/playbooks/designer.yml new file mode 100644 index 00000000000..ada06ab1eb5 --- /dev/null +++ b/playbooks/designer.yml @@ -0,0 +1,21 @@ +- name: Deploy edX designer + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: True + CLUSTER_NAME: 'designer' + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - designer + - designer + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: hermes + HERMES_TARGET_SERVICE: 'designer' + when: DESIGNER_HERMES_ENABLED diff --git a/playbooks/discovery.yml b/playbooks/discovery.yml index bd45e75a0eb..eb0717c0823 100644 --- a/playbooks/discovery.yml +++ b/playbooks/discovery.yml @@ -7,7 +7,8 @@ ENABLE_NEWRELIC: False CLUSTER_NAME: 'discovery' roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_default_sites: - discovery @@ -20,4 +21,7 @@ when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE - role: datadog-uninstall when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'discovery' + when: DISCOVERY_HERMES_ENABLED diff --git a/playbooks/ec2.py b/playbooks/ec2.py index a5315f951b5..f66adc9a21a 100755 --- a/playbooks/ec2.py +++ b/playbooks/ec2.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -''' +""" EC2 external inventory script ================================= @@ -87,7 +87,7 @@ Security groups are comma-separated in 'ec2_security_group_ids' and 'ec2_security_group_names'. -''' +""" # (c) 2012, Peter Sankauskas # @@ -108,6 +108,8 @@ ###################################################################### +from __future__ import absolute_import +from __future__ import print_function import sys import os import argparse @@ -117,8 +119,10 @@ from boto import ec2 from boto import rds from boto import route53 -import ConfigParser +import six.moves.configparser import traceback +import six +from six.moves import range try: import json @@ -127,12 +131,15 @@ class Ec2Inventory(object): + def _empty_inventory(self): + return {"_meta": {"hostvars": {}}} + def __init__(self): ''' Main execution path ''' # Inventory grouped by instance IDs, tags, security groups, regions, # and availability zones - self.inventory = {} + self.inventory = self._empty_inventory() # Index of hostname (address) to instance ID self.index = {} @@ -146,7 +153,6 @@ def __init__(self): self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() - # Data to print if self.args.host: data_to_print = self.get_host_info() @@ -157,7 +163,7 @@ def __init__(self): data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) - print data_to_print + print(data_to_print) def is_cache_valid(self): @@ -181,7 +187,7 @@ def is_cache_valid(self): def read_settings(self): ''' Reads the settings from the ec2.ini file ''' - config = ConfigParser.SafeConfigParser() + config = six.moves.configparser.SafeConfigParser() config.read(self.args.inifile) # is eucalyptus? @@ -247,7 +253,7 @@ def parse_cli_args(self): help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, + parser.add_argument('--refresh-cache', action='store_true', default=True, help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') default_inifile = os.environ.get("ANSIBLE_EC2_INI", os.path.dirname(os.path.realpath(__file__))+'/ec2.ini') @@ -290,23 +296,23 @@ def get_instances_by_region(self, region): # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported if conn is None: - print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + print(("region name: %s likely not supported, or AWS is down. connection to region failed." % region)) sys.exit(1) reservations = conn.get_all_instances() for reservation in reservations: - instances = sorted(reservation.instances) + instances = sorted(reservation.instances, key=lambda x: x.id) for instance in instances: self.add_instance(instance, region) except boto.exception.BotoServerError as e: if not self.eucalyptus: - print "Looks like AWS is down again:" - print e + print("Looks like AWS is down again:") + print(e) sys.exit(1) def get_rds_instances_by_region(self, region): - ''' Makes an AWS API call to the list of RDS instances in a particular + ''' Makes an AWS API call to the list of RDS instances in a particular region ''' try: @@ -316,8 +322,8 @@ def get_rds_instances_by_region(self, region): for instance in instances: self.add_rds_instance(instance, region) except boto.exception.BotoServerError as e: - print "Looks like AWS RDS is down: " - print e + print("Looks like AWS RDS is down: ") + print(e) sys.exit(1) def get_instance(self, region, instance_id): @@ -330,7 +336,7 @@ def get_instance(self, region, instance_id): # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported if conn is None: - print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + print(("region name: %s likely not supported, or AWS is down. connection to region failed." % region)) sys.exit(1) reservations = conn.get_all_instances([instance_id]) @@ -382,12 +388,12 @@ def add_instance(self, instance, region): key = self.to_safe("security_group_" + group.name) self.push(self.inventory, key, dest) except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' + print('Package boto seems a bit older.') + print('Please upgrade boto >= 2.3.0.') sys.exit(1) # Inventory: Group by tag keys - for k, v in instance.tags.iteritems(): + for k, v in six.iteritems(instance.tags): key = self.to_safe("tag_" + k + "=" + v) self.push(self.inventory, key, dest) self.keep_first(self.inventory, 'first_in_' + key, dest) @@ -439,8 +445,8 @@ def add_rds_instance(self, instance, region): key = self.to_safe("security_group_" + instance.security_group.name) self.push(self.inventory, key, dest) except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' + print('Package boto seems a bit older.') + print('Please upgrade boto >= 2.3.0.') sys.exit(1) # Inventory: Group by engine @@ -519,18 +525,17 @@ def get_host_info(self): for key in vars(instance): value = getattr(instance, key) key = self.to_safe('ec2_' + key) - # Handle complex types if type(value) in [int, bool]: instance_vars[key] = value - elif type(value) in [str, unicode]: + elif type(value) in [str, six.text_type]: instance_vars[key] = value.strip() elif type(value) == type(None): instance_vars[key] = '' elif key == 'ec2_region': instance_vars[key] = value.name elif key == 'ec2_tags': - for k, v in value.iteritems(): + for k, v in six.iteritems(value): key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': @@ -615,7 +620,7 @@ def json_format_dict(self, data, pretty=False): # Run the script RETRIES = 3 -for _ in xrange(RETRIES): +for _ in range(RETRIES): try: Ec2Inventory() break diff --git a/playbooks/ecommerce.yml b/playbooks/ecommerce.yml index 17903e59b90..1fd78ca8692 100644 --- a/playbooks/ecommerce.yml +++ b/playbooks/ecommerce.yml @@ -7,7 +7,8 @@ ENABLE_NEWRELIC: False CLUSTER_NAME: 'ecommerce' roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_default_sites: - ecommerce @@ -20,4 +21,7 @@ when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE - role: datadog-uninstall when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'ecommerce' + when: ECOMMERCE_HERMES_ENABLED diff --git a/playbooks/ecomworker.yml b/playbooks/ecomworker.yml index 0af7c1e5aeb..f7c288770bb 100644 --- a/playbooks/ecomworker.yml +++ b/playbooks/ecomworker.yml @@ -6,7 +6,8 @@ ENABLE_DATADOG: False ENABLE_NEWRELIC: False roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - ecomworker - role: datadog when: COMMON_ENABLE_DATADOG @@ -16,4 +17,7 @@ when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE - role: datadog-uninstall when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'ecomworker' + when: ECOMMERCE_WORKER_HERMES_ENABLED diff --git a/playbooks/edx-stateless.yml b/playbooks/edx-stateless.yml deleted file mode 100644 index 2524b5d9bf7..00000000000 --- a/playbooks/edx-stateless.yml +++ /dev/null @@ -1,91 +0,0 @@ ---- - -# Stateless app server configuration, designed to be used with external mysql, -# mongo, rabbitmq, and elasticsearch services. - -- name: Bootstrap instance(s) - hosts: all - gather_facts: no - become: True - roles: - - python - -- name: Configure instance(s) - hosts: all - become: True - gather_facts: True - - vars: - migrate_db: 'yes' - openid_workaround: True - EDXAPP_LMS_NGINX_PORT: '80' - ENABLE_ECOMMERCE: False # Disable ecommerce by default - roles: - - # Ensure we have no known security vulnerabilities - - security - - # Server setup - - swapfile - - # Nginx reverse proxy - - role: nginx - nginx_sites: - - certs - - cms - - lms - - forum - - xqueue - nginx_default_sites: - - lms - - # Main EdX application - # https://github.com/edx/edx-platform - - role: edxapp - celery_worker: True - - edxapp - - # Discussion forums - # https://github.com/edx/cs_comments_service - - forum - - # Notifications service - # https://github.com/edx/notifier - - role: notifier - NOTIFIER_DIGEST_TASK_INTERVAL: '5' - - # XQueue interface for communicating with external grader services - # https://github.com/edx/xqueue - - role: xqueue - update_users: True - - # Certificate generation - # https://github.com/edx/edx-certificates - - certs - - # Email sending - - postfix_queue - - # Ecommerce (optional) - # https://github.com/edx/ecommerce - - role: nginx - nginx_sites: - - ecommerce - when: ENABLE_ECOMMERCE - - role: ecommerce - when: ENABLE_ECOMMERCE - - role: ecomworker - when: ENABLE_ECOMMERCE - - # memcached - - role: memcache - when: "'localhost' in ' '.join(EDXAPP_MEMCACHE)" - - # Optional extras - - role: datadog - when: COMMON_ENABLE_DATADOG - - role: splunkforwarder - when: COMMON_ENABLE_SPLUNKFORWARDER - - role: datadog-uninstall - when: not COMMON_ENABLE_DATADOG - diff --git a/playbooks/edx_continuous_integration.yml b/playbooks/edx_continuous_integration.yml index 8861b1dabd3..6a1b1e9ac51 100644 --- a/playbooks/edx_continuous_integration.yml +++ b/playbooks/edx_continuous_integration.yml @@ -6,20 +6,22 @@ vars: migrate_db: "yes" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_sites: - cms - lms - xqueue - - xserver + - learner_portal + - program_manager nginx_default_sites: - lms - mysql - role: edxlocal tags: edxlocal - memcache - - mongo_3_2 + - mongo_3_6 - { role: 'edxapp', celery_worker: True } - edxapp - testcourses @@ -29,15 +31,17 @@ - forum - { role: notifier, NOTIFIER_DIGEST_TASK_INTERVAL: "5" } - { role: "xqueue", update_users: True } - - role: xserver - when: XSERVER_GIT_IDENTITY|length > 0 - edx_ansible - analytics_api - ecommerce - credentials - discovery - - role: journals - when: JOURNALS_ENABLED + - role: registrar + when: REGISTRAR_ENABLED + - role: learner_portal + when: LEARNER_PORTAL_ENABLED + - role: program_manager + when: PROGRAM_MANAGER_ENABLED - oauth_client_setup - role: datadog when: COMMON_ENABLE_DATADOG diff --git a/playbooks/edx_jenkins_tests.yml b/playbooks/edx_jenkins_tests.yml index 33682f14c19..c0eae4ed87b 100644 --- a/playbooks/edx_jenkins_tests.yml +++ b/playbooks/edx_jenkins_tests.yml @@ -13,7 +13,8 @@ - "{{ secure_dir }}/vars/edx_jenkins_tests.yml" roles: - common - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_sites: - lms diff --git a/playbooks/edx_maintenance.yml b/playbooks/edx_maintenance.yml new file mode 100644 index 00000000000..89dc9f996af --- /dev/null +++ b/playbooks/edx_maintenance.yml @@ -0,0 +1,20 @@ +# Usage: +# +# By default this playbook will disable the maintenance mode +# +# Enable maintenance +# ansible-playbook ./edx_maintenance.yml -i host1.example.com,host2.example.com, -e '{"ENABLE_MAINTENANCE": True}' +# +# Disable maintenance +# ansible-playbook ./edx_maintenance.yml -i host1.example.com,host2.example.com, -e '{"ENABLE_MAINTENANCE": False}' +# ansible-playbook ./edx_maintenance.yml -i host1.example.com,host2.example.com, +# +- name: Deploy edxapp + hosts: all + become: True + gather_facts: True + vars_files: + - 'roles/nginx/defaults/main.yml' + - 'roles/supervisor/defaults/main.yml' + roles: + - role: edx_maintenance diff --git a/playbooks/edx_provision.yml b/playbooks/edx_provision.yml index 37fc74a725a..9ecafc4a217 100644 --- a/playbooks/edx_provision.yml +++ b/playbooks/edx_provision.yml @@ -72,7 +72,8 @@ roles: # rerun common to set the hostname, nginx to set basic auth - common - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - edx-sandbox - role: nginx nginx_sites: diff --git a/playbooks/edxapp.yml b/playbooks/edxapp.yml index e574677bd93..0927fd5715b 100644 --- a/playbooks/edxapp.yml +++ b/playbooks/edxapp.yml @@ -20,10 +20,9 @@ nginx_default_sites: "{{ EDXAPP_NGINX_DEFAULT_SITES }}" nginx_extra_sites: "{{ NGINX_EDXAPP_EXTRA_SITES }}" nginx_extra_configs: "{{ NGINX_EDXAPP_EXTRA_CONFIGS }}" - nginx_redirects: "{{ NGINX_EDXAPP_CUSTOM_REDIRECTS }}" nginx_skip_enable_sites: "{{ EDXAPP_NGINX_SKIP_ENABLE_SITES }}" - edxapp - - role: devstack_sqlite_fix + - role: sqlite_fix when: devstack is defined and devstack - role: datadog when: COMMON_ENABLE_DATADOG @@ -35,4 +34,20 @@ when: COMMON_ENABLE_MINOS - role: datadog-uninstall when: not COMMON_ENABLE_DATADOG - + - role: hermes + when: "EDXAPP_HERMES_ENABLED" + HERMES_JITTER: 600 + HERMES_ALLOWED_SUDO_COMMANDS: + - "/bin/cp {{ hermes_download_dir }}/lms.yml {{ COMMON_CFG_DIR }}/lms.yml" + - "/bin/cp {{ hermes_download_dir }}/studio.yml {{ COMMON_CFG_DIR }}/studio.yml" + - "/edx/app/edxapp/reload_lms_config.sh" + - "/edx/app/edxapp/reload_cms_config.sh" + HERMES_SERVICE_CONFIG: + - url: '{{ HERMES_REMOTE_FILE_LOCATION }}/{{ COMMON_ENVIRONMENT }}/lms.yml' + filename: '{{ hermes_download_dir }}/lms.yml' + command: "sudo /bin/cp {{ hermes_download_dir }}/lms.yml {{ COMMON_CFG_DIR }}/lms.yml && sudo /edx/app/edxapp/reload_lms_config.sh" + secret_key_files: "{{ HERMES_PRIVATE_KEYS_DICT | map('regex_replace','(.*)','/edx/app/hermes/hermes-\\1') | join(',') if HERMES_PRIVATE_KEYS_DICT is defined else None }}" + - url: '{{ HERMES_REMOTE_FILE_LOCATION }}/{{ COMMON_ENVIRONMENT }}/studio.yml' + filename: '{{ hermes_download_dir }}/studio.yml' + command: "sudo /bin/cp {{ hermes_download_dir }}/studio.yml {{ COMMON_CFG_DIR }}/studio.yml && sudo /edx/app/edxapp/reload_cms_config.sh" + secret_key_files: "{{ HERMES_PRIVATE_KEYS_DICT | map('regex_replace','(.*)','/edx/app/hermes/hermes-\\1') | join(',') if HERMES_PRIVATE_KEYS_DICT is defined else None }}" diff --git a/playbooks/elasticsearch.yml b/playbooks/elasticsearch.yml index 7133d5e75be..e5c3ff6954d 100644 --- a/playbooks/elasticsearch.yml +++ b/playbooks/elasticsearch.yml @@ -10,7 +10,7 @@ CLUSTER_NAME: "commoncluster" serial: "{{ serial_count }}" pre_tasks: - - action: ec2_facts + - action: ec2_metadata_facts when: elb_pre_post - debug: var: ansible_ec2_instance_id @@ -26,7 +26,8 @@ when: elb_pre_post roles: - common - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - elasticsearch post_tasks: - debug: diff --git a/playbooks/enterprise_catalog.yml b/playbooks/enterprise_catalog.yml new file mode 100644 index 00000000000..c773022b4dd --- /dev/null +++ b/playbooks/enterprise_catalog.yml @@ -0,0 +1,21 @@ +- name: Deploy edX designer + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: True + CLUSTER_NAME: 'enterprise_catalog' + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - enterprise_catalog + - enterprise_catalog + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: hermes + HERMES_TARGET_SERVICE: 'enterprise_catalog' + when: ENTERPRISE_CATALOG_HERMES_ENABLED diff --git a/playbooks/flower.yml b/playbooks/flower.yml index fd644a86e41..c84bc3f5b50 100644 --- a/playbooks/flower.yml +++ b/playbooks/flower.yml @@ -6,5 +6,6 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - flower diff --git a/playbooks/forum.yml b/playbooks/forum.yml index 287f6304e3c..8d356cfa336 100644 --- a/playbooks/forum.yml +++ b/playbooks/forum.yml @@ -7,7 +7,8 @@ CLUSTER_NAME: 'forum' serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_sites: - forum diff --git a/playbooks/ghost.yml b/playbooks/ghost.yml new file mode 100644 index 00000000000..e9f4398b636 --- /dev/null +++ b/playbooks/ghost.yml @@ -0,0 +1,11 @@ +- name: Install gh-ost + hosts: all + become: True + gather_facts: True + vars: + serial_count: 1 + serial: "{{ serial_count }}" + roles: + - common + - ghost + diff --git a/playbooks/go-agent-docker.yml b/playbooks/go-agent-docker.yml index ec51dcb911e..89c4acad39b 100644 --- a/playbooks/go-agent-docker.yml +++ b/playbooks/go-agent-docker.yml @@ -5,5 +5,6 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - go-agent-docker-server diff --git a/playbooks/go-server.yml b/playbooks/go-server.yml index 01e79d4fab1..361b897387b 100644 --- a/playbooks/go-server.yml +++ b/playbooks/go-server.yml @@ -2,14 +2,25 @@ # https://www.go.cd/ # # https://openedx.atlassian.net/wiki/spaces/EdxOps/pages/157526357/How+to+Upgrade+GoCD -# Example command: ansible-playbook -i , -c ssh go-server.yml -e@/path/to/secure/ansible/vars/admin/admin.yml -e@/path/to/secure/ansible/vars/admin/edx_admin.yml -e@/path/to/secure/ansible/vars/admin/pipeline/gocd.yml -D +# Example command: ansible-playbook -i , -c ssh go-server.yml -e@/path/to/secure/ansible/vars/admin/admin.yml -e@/path/to/secure/ansible/vars/admin/edx_admin.yml -e@/path/to/secure/ansible/vars/admin/pipeline/gocd.yml -CD + +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - role: python + tags: + - install + - install:system-requirements - name: Install go-server hosts: all become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - go-server - role: splunkforwarder when: COMMON_ENABLE_SPLUNKFORWARDER diff --git a/playbooks/insights.yml b/playbooks/insights.yml index cf21e7e204d..1add0ec6555 100644 --- a/playbooks/insights.yml +++ b/playbooks/insights.yml @@ -7,7 +7,8 @@ ENABLE_NEWRELIC: True CLUSTER_NAME: 'insights' roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_sites: - insights @@ -20,4 +21,7 @@ when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE - role: datadog-uninstall when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'insights' + when: INSIGHTS_HERMES_ENABLED diff --git a/playbooks/jenkins_admin.yml b/playbooks/jenkins_admin.yml index bce68312564..72f99a46c3f 100644 --- a/playbooks/jenkins_admin.yml +++ b/playbooks/jenkins_admin.yml @@ -13,7 +13,8 @@ COMMON_SECURITY_UPDATES: yes SECURITY_UPGRADE_ON_ANSIBLE: true roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - jenkins_admin # This requires an override of the following form: # SPLUNKFORWARDER_LOG_ITEMS: diff --git a/playbooks/jenkins_build.yml b/playbooks/jenkins_build.yml index a2ef882b08f..52d3a267cb7 100644 --- a/playbooks/jenkins_build.yml +++ b/playbooks/jenkins_build.yml @@ -17,7 +17,8 @@ SECURITY_UPGRADE_ON_ANSIBLE: true roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: datadog when: COMMON_ENABLE_DATADOG - role: datadog-uninstall diff --git a/playbooks/jenkins_build_bastion.yml b/playbooks/jenkins_build_bastion.yml new file mode 100644 index 00000000000..3f25b9d984b --- /dev/null +++ b/playbooks/jenkins_build_bastion.yml @@ -0,0 +1,25 @@ +--- +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python + +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + vars: + COMMON_ENABLE_DATADOG: False + COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE: True + COMMON_SECURITY_UPDATES: yes + SECURITY_UPGRADE_ON_ANSIBLE: true + + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + tags: + - newreliconly diff --git a/playbooks/jenkins_data_engineering.yml b/playbooks/jenkins_data_engineering.yml new file mode 100644 index 00000000000..0da91516110 --- /dev/null +++ b/playbooks/jenkins_data_engineering.yml @@ -0,0 +1,32 @@ +--- +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python + +- name: Mount EBS + hosts: all + become: True + vars: + volumes: "{{ JENKINS_VOLUMES }}" + roles: + - mount_ebs + +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + vars: + COMMON_ENABLE_SPLUNKFORWARDER: False + COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE: True + COMMON_SECURITY_UPDATES: yes + SECURITY_UPGRADE_ON_ANSIBLE: true + + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - jenkins_data_engineering + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/jenkins_data_engineering_new.yml b/playbooks/jenkins_data_engineering_new.yml new file mode 100644 index 00000000000..ecdd0c33ad5 --- /dev/null +++ b/playbooks/jenkins_data_engineering_new.yml @@ -0,0 +1,32 @@ +--- +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python + +- name: Mount EBS + hosts: all + become: True + vars: + volumes: "{{ JENKINS_VOLUMES }}" + roles: + - mount_ebs + +- name: Configure instance(s) + hosts: all + become: True + gather_facts: True + vars: + COMMON_ENABLE_SPLUNKFORWARDER: False + COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE: True + COMMON_SECURITY_UPDATES: yes + SECURITY_UPGRADE_ON_ANSIBLE: true + + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - jenkins_data_engineering_new + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/jenkins_de.yml b/playbooks/jenkins_it.yml similarity index 55% rename from playbooks/jenkins_de.yml rename to playbooks/jenkins_it.yml index f0d86bf8861..3a34b4a3669 100644 --- a/playbooks/jenkins_de.yml +++ b/playbooks/jenkins_it.yml @@ -11,15 +11,12 @@ become: True gather_facts: True vars: - COMMON_ENABLE_DATADOG: True - COMMON_ENABLE_SPLUNKFORWARDER: False + COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE: True COMMON_SECURITY_UPDATES: yes SECURITY_UPGRADE_ON_ANSIBLE: true roles: - - aws - - role: datadog - when: COMMON_ENABLE_DATADOG - - role: datadog-uninstall - when: not COMMON_ENABLE_DATADOG - - jenkins_de + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - docker-tools + - jenkins_it diff --git a/playbooks/jenkins_testeng_master.yml b/playbooks/jenkins_testeng_master.yml index 3a2aade668c..f69eef5642a 100644 --- a/playbooks/jenkins_testeng_master.yml +++ b/playbooks/jenkins_testeng_master.yml @@ -49,7 +49,8 @@ followSymlink: false roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: datadog when: COMMON_ENABLE_DATADOG - role: datadog-uninstall diff --git a/playbooks/jenkins_worker.yml b/playbooks/jenkins_worker.yml index e010ab8dffb..ef43c1f1494 100644 --- a/playbooks/jenkins_worker.yml +++ b/playbooks/jenkins_worker.yml @@ -13,6 +13,9 @@ COMMON_SECURITY_UPDATES: yes SECURITY_UPGRADE_ON_ANSIBLE: true MONGO_AUTH: false + SQLITE_AUTOCONF_URL: "https://www.sqlite.org/2019/sqlite-autoconf-3280000.tar.gz" + SQLITE_AUTOCONF_CREATED_PATH: "sqlite-autoconf-3280000" + SQLITE_FIX_PYTHON_PATH: "/home/jenkins/edx-venv/bin/python" serial: "{{ serial_count }}" vars_files: - roles/edxapp/defaults/main.yml @@ -20,10 +23,14 @@ - roles/xserver/defaults/main.yml - roles/forum/defaults/main.yml roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - docker-tools - mysql - edxlocal - memcache - mongo_3_2 - browsers - jenkins_worker + - sqlite_fix + - newrelic_infrastructure diff --git a/playbooks/jenkins_worker_android.yml b/playbooks/jenkins_worker_android.yml index 71a07441fdc..0de550bf8fa 100644 --- a/playbooks/jenkins_worker_android.yml +++ b/playbooks/jenkins_worker_android.yml @@ -9,9 +9,12 @@ vars: serial_count: 1 android_worker: True + jenkins_groups: "jenkins" COMMON_SECURITY_UPDATES: yes SECURITY_UPGRADE_ON_ANSIBLE: true serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - jenkins_worker + - newrelic_infrastructure diff --git a/playbooks/jenkins_worker_codejail.yml b/playbooks/jenkins_worker_codejail.yml new file mode 100644 index 00000000000..7e8cc66085a --- /dev/null +++ b/playbooks/jenkins_worker_codejail.yml @@ -0,0 +1,21 @@ +# Configure a Jenkins worker instance specifically for running tests for +# CodeJail, which requires specific set up with regards to python execution + +- name: Configure instance(s) + hosts: jenkins_worker + become: True + gather_facts: True + vars: + serial_count: 1 + codejail_worker: True + codejail_sandbox_caller: 'jenkins' + COMMON_SECURITY_UPDATES: yes + SECURITY_UPGRADE_ON_ANSIBLE: true + serial: "{{ serial_count }}" + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - docker-tools + - jenkins_worker + - codejail + - newrelic_infrastructure diff --git a/playbooks/jenkins_worker_loadtest_driver.yml b/playbooks/jenkins_worker_loadtest_driver.yml deleted file mode 100644 index 1cfba9f8a95..00000000000 --- a/playbooks/jenkins_worker_loadtest_driver.yml +++ /dev/null @@ -1,14 +0,0 @@ -# Configure a Jenkins worker instance -# This has all the requirements to run load tests. - -- name: Configure instance(s) - hosts: jenkins_worker - become: True - gather_facts: True - vars: - loadtest_driver_worker: True - COMMON_SECURITY_UPDATES: yes - SECURITY_UPGRADE_ON_ANSIBLE: true - roles: - - aws - - jenkins_worker diff --git a/playbooks/journals.yml b/playbooks/journals.yml deleted file mode 100644 index c6ae88d9b76..00000000000 --- a/playbooks/journals.yml +++ /dev/null @@ -1,22 +0,0 @@ -- name: Deploy Journals - hosts: all - become: True - gather_facts: True - vars: - ENABLE_DATADOG: False - ENABLE_NEWRELIC: False - CLUSTER_NAME: 'journals' - roles: - - aws - - role: nginx - nginx_default_sites: - - journals - - journals - - role: datadog - when: COMMON_ENABLE_DATADOG - - role: splunkforwarder - when: COMMON_ENABLE_SPLUNKFORWARDER - - role: newrelic_infrastructure - when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE - - role: datadog-uninstall - when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/learner_portal.yml b/playbooks/learner_portal.yml new file mode 100644 index 00000000000..cab41f71b73 --- /dev/null +++ b/playbooks/learner_portal.yml @@ -0,0 +1,21 @@ +- name: Deploy learner_portal Frontend + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'learner-portal' + LEARNER_PORTAL_ENABLED: True + LEARNER_PORTAL_SANDBOX_BUILD: False + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_sites: + - learner_portal + LEARNER_PORTAL_NGINX_PORT: 8775 + - learner_portal + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/legacy_certificates.yml b/playbooks/legacy_certificates.yml index 6d8e4783c9f..6a538e6895e 100644 --- a/playbooks/legacy_certificates.yml +++ b/playbooks/legacy_certificates.yml @@ -7,7 +7,8 @@ serial_count: 1 repo_url: "git@github.com:edx/certificates.git" repo_path: "/opt/wwc/certificates" - certificates_version: "master" + CERTIFICATES_VERSION: "master" + certificates_version: "{{ CERTIFICATES_VERSION }}" git_ssh_script: "/opt/wwc/git.sh" serial: "{{ serial_count }}" tasks: diff --git a/playbooks/library/ec2_acl b/playbooks/library/ec2_acl index d72cc0f1eef..901196f449f 100644 --- a/playbooks/library/ec2_acl +++ b/playbooks/library/ec2_acl @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import absolute_import +from __future__ import print_function DOCUMENTATION = """ --- module: ec2_acl @@ -54,7 +56,7 @@ import sys try: import boto.vpc except ImportError: - print "failed=True msg={0}".format(sys.executable) + print("failed=True msg={0}".format(sys.executable)) #print "failed=True msg='boto required for this module'" sys.exit(1) @@ -258,7 +260,7 @@ def main(): if region: try: connection = boto.vpc.connect_to_region(region, profile_name=profile) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") diff --git a/playbooks/library/ec2_group_local b/playbooks/library/ec2_group_local index ec029593090..4a66c41377c 100644 --- a/playbooks/library/ec2_group_local +++ b/playbooks/library/ec2_group_local @@ -2,6 +2,10 @@ # -*- coding: utf-8 -*- +from __future__ import absolute_import +from __future__ import print_function +import six +from functools import reduce DOCUMENTATION = ''' --- module: ec2_group @@ -103,7 +107,7 @@ EXAMPLES = ''' try: import boto.ec2 except ImportError: - print "failed=True msg='boto required for this module'" + print("failed=True msg='boto required for this module'") sys.exit(1) @@ -239,7 +243,7 @@ def main(): '''found a match, delete it''' try: group.delete() - except Exception, e: + except Exception as e: module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e)) else: group = None @@ -318,7 +322,7 @@ def main(): changed = True # Finally, remove anything left in the groupRules -- these will be defunct rules - for rule in groupRules.itervalues(): + for rule in six.itervalues(groupRules): for grant in rule.grants: grantGroup = None if grant.group_id: @@ -382,7 +386,7 @@ def main(): del groupRules[default_egress_rule] # Finally, remove anything left in the groupRules -- these will be defunct rules - for rule in groupRules.itervalues(): + for rule in six.itervalues(groupRules): for grant in rule.grants: grantGroup = None if grant.group_id: diff --git a/playbooks/library/ec2_iam_role b/playbooks/library/ec2_iam_role index cb339520247..490e7e48300 100644 --- a/playbooks/library/ec2_iam_role +++ b/playbooks/library/ec2_iam_role @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import absolute_import +from __future__ import print_function DOCUMENTATION = """ --- module: ec2_iam_role @@ -54,7 +56,7 @@ import sys try: import boto except ImportError: - print "failed=True msg='boto required for this module'" + print("failed=True msg='boto required for this module'") sys.exit(1) def present(connection, module): @@ -151,7 +153,7 @@ def main(): try: connection = boto.connect_iam(profile_name=profile) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg = str(e)) state = module.params.get('state') diff --git a/playbooks/library/ec2_lookup b/playbooks/library/ec2_lookup index f3a6edddab2..92c3161d351 100644 --- a/playbooks/library/ec2_lookup +++ b/playbooks/library/ec2_lookup @@ -14,6 +14,9 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import absolute_import +from __future__ import print_function +import six DOCUMENTATION = ''' --- module: ec2_lookup @@ -82,7 +85,7 @@ try: import boto.ec2 from boto.ec2 import connect_to_region except ImportError: - print "failed=True msg='boto required for this module'" + print("failed=True msg='boto required for this module'") sys.exit(1) @@ -110,14 +113,14 @@ def main(): try: ec2 = connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) # If we specified an ec2_url then try connecting to it elif ec2_url: try: ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="Either region or ec2_url must be specified") @@ -125,11 +128,11 @@ def main(): instances = [] instance_ids = [] for res in ec2.get_all_instances(filters={'tag:' + tag: value - for tag, value in module.params.get('tags').iteritems()}): + for tag, value in six.iteritems(module.params.get('tags'))}): for inst in res.instances: if inst.state == "running": - instances.append({k: v for k, v in inst.__dict__.iteritems() - if isinstance(v, (basestring))}) + instances.append({k: v for k, v in six.iteritems(inst.__dict__) + if isinstance(v, (six.string_types))}) instance_ids.append(inst.id) module.exit_json(changed=False, instances=instances, instance_ids=instance_ids) diff --git a/playbooks/library/ec2_rt b/playbooks/library/ec2_rt index 9bd22350181..138754d19be 100644 --- a/playbooks/library/ec2_rt +++ b/playbooks/library/ec2_rt @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import absolute_import +from __future__ import print_function DOCUMENTATION = """ --- module: ec2_rt @@ -53,7 +55,7 @@ import sys try: import boto.vpc except ImportError: - print "failed=True msg={0}".format(sys.executable) + print("failed=True msg={0}".format(sys.executable)) sys.exit(1) @@ -211,7 +213,7 @@ def main(): if region: try: connection = boto.vpc.connect_to_region(region,profile_name=profile) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg = str(e)) else: module.fail_json(msg="region must be specified") diff --git a/playbooks/library/ec2_subnet b/playbooks/library/ec2_subnet index 345ac31fa7b..254c91cfb87 100644 --- a/playbooks/library/ec2_subnet +++ b/playbooks/library/ec2_subnet @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import absolute_import +from __future__ import print_function DOCUMENTATION = """ --- module: ec2_subnet @@ -57,7 +59,7 @@ import sys try: import boto.vpc except ImportError: - print "failed=True msg='boto required for this module'" + print("failed=True msg='boto required for this module'") sys.exit(1) from boto.exception import NoAuthHandlerFound @@ -204,7 +206,7 @@ def main(): if region: try: connection = boto.vpc.connect_to_region(region, profile_name=profile) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") diff --git a/playbooks/library/ec2_tag_local b/playbooks/library/ec2_tag_local index c634756cd93..f529a397f46 100644 --- a/playbooks/library/ec2_tag_local +++ b/playbooks/library/ec2_tag_local @@ -16,6 +16,8 @@ # edX: Edited to allow for variable tag names +from __future__ import absolute_import +from __future__ import print_function DOCUMENTATION = ''' --- module: ec2_tag @@ -94,7 +96,7 @@ import time try: import boto.ec2 except ImportError: - print "failed=True msg='boto required for this module'" + print("failed=True msg='boto required for this module'") sys.exit(1) def main(): diff --git a/playbooks/library/ec2_vpc_local b/playbooks/library/ec2_vpc_local index 73ebaba0721..8f457fec4eb 100644 --- a/playbooks/library/ec2_vpc_local +++ b/playbooks/library/ec2_vpc_local @@ -18,6 +18,7 @@ # https://github.com/ansible/ansible-modules-core/pull/1323 +from __future__ import absolute_import DOCUMENTATION = ''' --- module: ec2_vpc @@ -394,7 +395,7 @@ def create_vpc(module, vpc_conn): pending = False # sometimes vpc_conn.create_vpc() will return a vpc that can't be found yet by vpc_conn.get_all_vpcs() # when that happens, just wait a bit longer and try again - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: if e.error_code != 'InvalidVpcID.NotFound': raise if pending: @@ -403,7 +404,7 @@ def create_vpc(module, vpc_conn): # waiting took too long module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime()) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) # Done with base VPC, now change to attributes and features. @@ -457,7 +458,7 @@ def create_vpc(module, vpc_conn): vpc_conn.create_tags(new_subnet.id, new_subnet_tags) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e)) # Now delete all absent subnets @@ -470,7 +471,7 @@ def create_vpc(module, vpc_conn): try: vpc_conn.delete_subnet(csubnet.id) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to delete subnet {0}, error: {1}'.format(csubnet.cidr_block, e)) # Handle Internet gateway (create/delete igw) @@ -486,7 +487,7 @@ def create_vpc(module, vpc_conn): vpc_conn.create_tags(igw.id, internet_gateway_tags) vpc_conn.attach_internet_gateway(igw.id, vpc.id) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to create Internet Gateway, error: {0}'.format(e)) else: # Set igw variable to the current igw instance for use in route tables. @@ -497,7 +498,7 @@ def create_vpc(module, vpc_conn): vpc_conn.detach_internet_gateway(igws[0].id, vpc.id) vpc_conn.delete_internet_gateway(igws[0].id) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e)) # Handle route tables - this may be worth splitting into a @@ -563,7 +564,7 @@ def create_vpc(module, vpc_conn): all_route_tables.append(new_rt) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json( msg='Unable to create and associate route table {0}, error: ' \ '{1}'.format(rt, e) @@ -592,7 +593,7 @@ def create_vpc(module, vpc_conn): if not is_main: vpc_conn.delete_route_table(rt.id) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e)) vpc_dict = get_vpc_info(vpc) @@ -670,7 +671,7 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None): vpc_conn.delete_route_table(rt.id) vpc_conn.delete_vpc(vpc.id) - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json( msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e) ) @@ -716,7 +717,7 @@ def main(): region, **aws_connect_kwargs ) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg = str(e)) else: module.fail_json(msg="region must be specified") diff --git a/playbooks/library/mongodb_replica_set b/playbooks/library/mongodb_replica_set index 462c77f4459..8026d24ddfe 100644 --- a/playbooks/library/mongodb_replica_set +++ b/playbooks/library/mongodb_replica_set @@ -1,5 +1,10 @@ #!/usr/bin/env python +from __future__ import absolute_import +from six.moves import filter +from six.moves import map +from six.moves import range +from six.moves import zip DOCUMENTATION = """ --- module: mongodb_replica_set @@ -92,7 +97,7 @@ else: pymongo_found = True import json, copy -from urllib import quote_plus +from six.moves.urllib.parse import quote_plus from operator import itemgetter ########### Mongo API calls ########### @@ -282,7 +287,7 @@ def update_replset(rs_config): def get_mongo_uri(host, port, username, password, auth_database): mongo_uri = 'mongodb://' if username and password: - mongo_uri += "{}:{}@".format(*map(quote_plus, [username,password])) + mongo_uri += "{}:{}@".format(*list(map(quote_plus, [username,password]))) mongo_uri += "{}:{}".format(quote_plus(host), port) @@ -308,7 +313,7 @@ def primary_client(some_host, some_port, username, password, auth_database): status = client.admin.command("replSetGetStatus") # Find out who the primary is. - rs_primary = filter(lambda member: member['stateStr']=='PRIMARY', status['members'])[0] + rs_primary = [member for member in status['members'] if member['stateStr']=='PRIMARY'][0] primary_host, primary_port = rs_primary['name'].split(':') # Connect to the primary if this is not the primary. diff --git a/playbooks/library/mongodb_rs_config b/playbooks/library/mongodb_rs_config index 7d6b29a3264..02c9ed1fb22 100644 --- a/playbooks/library/mongodb_rs_config +++ b/playbooks/library/mongodb_rs_config @@ -1,5 +1,7 @@ #!/usr/bin/env python +from __future__ import absolute_import +from six.moves import map DOCUMENTATION = """ --- module: mongodb_rs_config @@ -59,7 +61,7 @@ else: pymongo_found = True import json -from urllib import quote_plus +from six.moves.urllib.parse import quote_plus def main(): @@ -87,7 +89,7 @@ def main(): module.fail_json(msg="Must provide both username and password or neither.") if username: - mongo_uri += "{}:{}@".format(*map(quote_plus, [username,password])) + mongo_uri += "{}:{}@".format(*list(map(quote_plus, [username,password]))) mongo_uri += "{}:{}".format(quote_plus(host),port) diff --git a/playbooks/library/mongodb_rs_status b/playbooks/library/mongodb_rs_status index 4dbd3b9f08c..45e39437620 100644 --- a/playbooks/library/mongodb_rs_status +++ b/playbooks/library/mongodb_rs_status @@ -1,5 +1,7 @@ #!/usr/bin/env python +from __future__ import absolute_import +from six.moves import map DOCUMENTATION = """ --- module: mongodb_rs_status @@ -67,7 +69,7 @@ else: pymongo_found = True import json -from urllib import quote_plus +from six.moves.urllib.parse import quote_plus def main(): @@ -95,7 +97,7 @@ def main(): module.fail_json(msg="Must provide both username and password or neither.") if username: - mongo_uri += "{}:{}@".format(*map(quote_plus, [username,password])) + mongo_uri += "{}:{}@".format(*list(map(quote_plus, [username,password]))) mongo_uri += "{}:{}".format(quote_plus(host),port) diff --git a/playbooks/library/mongodb_step_down b/playbooks/library/mongodb_step_down index dda30b399fa..a531801b192 100644 --- a/playbooks/library/mongodb_step_down +++ b/playbooks/library/mongodb_step_down @@ -1,5 +1,8 @@ #!/usr/bin/env python +from __future__ import absolute_import +from six.moves import map +from six.moves import range DOCUMENTATION = """ --- module: mongodb_step_down @@ -58,7 +61,7 @@ else: pymongo_found = True import json -from urllib import quote_plus +from six.moves.urllib.parse import quote_plus def main(): @@ -86,7 +89,7 @@ def main(): module.fail_json(msg="Must provide both username and password or neither.") if username: - mongo_uri += "{}:{}@".format(*map(quote_plus, [username,password])) + mongo_uri += "{}:{}@".format(*list(map(quote_plus, [username,password]))) mongo_uri += "{}:{}".format(quote_plus(host),port) diff --git a/playbooks/library/rds_local b/playbooks/library/rds_local index 19f0bbe58d6..e9f58276efc 100644 --- a/playbooks/library/rds_local +++ b/playbooks/library/rds_local @@ -14,6 +14,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import absolute_import DOCUMENTATION = ''' --- module: rds @@ -334,19 +335,19 @@ class RDSConnection: def __init__(self, module, region, **aws_connect_params): try: self.connection = connect_to_aws(boto.rds, region, **aws_connect_params) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg=e.error_message) def get_db_instance(self, instancename): try: return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0]) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: return None def get_db_snapshot(self, snapshotid): try: return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0]) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: return None def create_db_instance(self, instance_name, size, instance_class, db_engine, @@ -356,63 +357,63 @@ class RDSConnection: result = self.connection.create_dbinstance(instance_name, size, instance_class, username, password, **params) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def create_db_instance_read_replica(self, instance_name, source_instance, **params): try: result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def delete_db_instance(self, instance_name, **params): try: result = self.connection.delete_dbinstance(instance_name, **params) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def delete_db_snapshot(self, snapshot): try: result = self.connection.delete_dbsnapshot(snapshot) return RDSSnapshot(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def modify_db_instance(self, instance_name, **params): try: result = self.connection.modify_dbinstance(instance_name, **params) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def reboot_db_instance(self, instance_name, **params): try: result = self.connection.reboot_dbinstance(instance_name) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): try: result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def create_db_snapshot(self, snapshot, instance_name, **params): try: result = self.connection.create_dbsnapshot(snapshot, instance_name) return RDSSnapshot(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def promote_read_replica(self, instance_name, **params): try: result = self.connection.promote_read_replica(instance_name, **params) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) @@ -420,7 +421,7 @@ class RDS2Connection: def __init__(self, module, region, **aws_connect_params): try: self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg=e.error_message) def get_db_instance(self, instancename): @@ -428,9 +429,9 @@ class RDS2Connection: dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'] result = RDS2DBInstance(dbinstances[0]) return result - except boto.rds2.exceptions.DBInstanceNotFound, e: + except boto.rds2.exceptions.DBInstanceNotFound as e: return None - except Exception, e: + except Exception as e: raise e def get_db_snapshot(self, snapshotid): @@ -438,7 +439,7 @@ class RDS2Connection: snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots'] result = RDS2Snapshot(snapshots[0]) return result - except boto.rds2.exceptions.DBSnapshotNotFound, e: + except boto.rds2.exceptions.DBSnapshotNotFound as e: return None def create_db_instance(self, instance_name, size, instance_class, db_engine, @@ -447,63 +448,63 @@ class RDS2Connection: result = self.connection.create_db_instance(instance_name, size, instance_class, db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def create_db_instance_read_replica(self, instance_name, source_instance, **params): try: result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def delete_db_instance(self, instance_name, **params): try: result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def delete_db_snapshot(self, snapshot): try: result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] return RDS2Snapshot(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def modify_db_instance(self, instance_name, **params): try: result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def reboot_db_instance(self, instance_name, **params): try: result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): try: result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def create_db_snapshot(self, snapshot, instance_name, **params): try: result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot'] return RDS2Snapshot(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def promote_read_replica(self, instance_name, **params): try: result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) @@ -544,7 +545,7 @@ class RDSDBInstance: # ReadReplicaSourceDBInstanceIdentifier may or may not exist try: d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier - except Exception, e: + except Exception as e: d["replication_source"] = None return d @@ -679,7 +680,7 @@ def create_db_instance(module, conn): module.params.get('instance_type'), module.params.get('db_engine'), module.params.get('username'), module.params.get('password'), **params) changed = True - except RDSException, e: + except RDSException as e: module.fail_json(msg="Failed to create instance: %s" % e.message) if module.params.get('wait'): @@ -706,7 +707,7 @@ def replicate_db_instance(module, conn): try: result = conn.create_db_instance_read_replica(instance_name, source_instance, **params) changed = True - except RDSException, e: + except RDSException as e: module.fail_json(msg="Failed to create replica instance: %s " % e.message) if module.params.get('wait'): @@ -745,7 +746,7 @@ def delete_db_instance_or_snapshot(module, conn): result = conn.delete_db_instance(instance_name, **params) else: result = conn.delete_db_snapshot(snapshot) - except RDSException, e: + except RDSException as e: module.fail_json(msg="Failed to delete instance: %s" % e.message) # If we're not waiting for a delete to complete then we're all done @@ -755,12 +756,12 @@ def delete_db_instance_or_snapshot(module, conn): try: resource = await_resource(conn, result, 'deleted', module) module.exit_json(changed=True) - except RDSException, e: + except RDSException as e: if e.code == 'DBInstanceNotFound': module.exit_json(changed=True) else: module.fail_json(msg=e.message) - except Exception, e: + except Exception as e: module.fail_json(msg=str(e)) @@ -798,7 +799,7 @@ def modify_db_instance(module, conn): try: result = conn.modify_db_instance(instance_name, **params) - except RDSException, e: + except RDSException as e: module.fail_json(msg=e.message) if params.get('apply_immediately'): if new_instance_name: @@ -836,7 +837,7 @@ def promote_db_instance(module, conn): try: result = conn.promote_read_replica(instance_name, **params) changed = True - except RDSException, e: + except RDSException as e: module.fail_json(msg=e.message) else: changed = False @@ -861,7 +862,7 @@ def snapshot_db_instance(module, conn): try: result = conn.create_db_snapshot(snapshot, instance_name, **params) changed = True - except RDSException, e: + except RDSException as e: module.fail_json(msg=e.message) if module.params.get('wait'): @@ -886,7 +887,7 @@ def reboot_db_instance(module, conn): try: result = conn.reboot_db_instance(instance_name, **params) changed = True - except RDSException, e: + except RDSException as e: module.fail_json(msg=e.message) if module.params.get('wait'): @@ -917,7 +918,7 @@ def restore_db_instance(module, conn): try: result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params) changed = True - except RDSException, e: + except RDSException as e: module.fail_json(msg=e.message) if module.params.get('wait'): @@ -1005,7 +1006,7 @@ def validate_parameters(required_vars, valid_vars, module): # Convert tags dict to list of tuples that rds2 expects if 'tags' in params: - params['tags'] = module.params['tags'].items() + params['tags'] = list(module.params['tags'].items()) return params diff --git a/playbooks/library/util_map b/playbooks/library/util_map index f674d6eaa9d..ef5b60d2178 100755 --- a/playbooks/library/util_map +++ b/playbooks/library/util_map @@ -1,5 +1,6 @@ #!/usr/bin/env python +from __future__ import absolute_import DOCUMENTATION = """ --- module: util_map diff --git a/playbooks/lifecycle_inventory.py b/playbooks/lifecycle_inventory.py index 9429b7558c5..892ab723988 100755 --- a/playbooks/lifecycle_inventory.py +++ b/playbooks/lifecycle_inventory.py @@ -28,6 +28,8 @@ } """ +from __future__ import absolute_import +from __future__ import print_function import argparse import boto3 import json @@ -63,10 +65,20 @@ def get_instance_dict(self): return dict - def run(self): + def get_asgs(self): asg = boto3.client('autoscaling', region_name=self.region) - - groups = asg.describe_auto_scaling_groups()['AutoScalingGroups'] + asg_request = asg.describe_auto_scaling_groups() + asg_accumulator = asg_request['AutoScalingGroups'] + + while 'NextToken' in asg_request: + asg_request = asg.describe_auto_scaling_groups(NextToken=asg_request['NextToken']) + asg_accumulator.extend(asg_request['AutoScalingGroups']) + + return asg_accumulator + + def run(self): + + groups = self.get_asgs() instances = self.get_instance_dict() inventory = defaultdict(list) @@ -83,12 +95,12 @@ def run(self): inventory[group['AutoScalingGroupName'] + "_" + instance['LifecycleState'].replace(":","_")].append(private_ip_address) inventory[instance['LifecycleState'].replace(":","_")].append(private_ip_address) - print json.dumps(inventory, sort_keys=True, indent=2) + print(json.dumps(inventory, sort_keys=True, indent=2)) if __name__=="__main__": parser = argparse.ArgumentParser() - parser.add_argument('-r', '--region', help='The aws region to use when connecting.', default='us-east-1') + parser.add_argument('-r', '--region', help='The aws region to use when connecting.', default=environ.get('AWS_REGION', 'us-east-1')) parser.add_argument('-l', '--list', help='Ansible passes this, we ignore it.', action='store_true', default=True) args = parser.parse_args() diff --git a/playbooks/locust.yml b/playbooks/locust.yml index 3f4f20a6670..613f2279f80 100644 --- a/playbooks/locust.yml +++ b/playbooks/locust.yml @@ -4,5 +4,6 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - locust diff --git a/playbooks/manage_edxapp_users_and_groups.yml b/playbooks/manage_edxapp_users_and_groups.yml index 98c7a64a4a0..45b111f9fe8 100644 --- a/playbooks/manage_edxapp_users_and_groups.yml +++ b/playbooks/manage_edxapp_users_and_groups.yml @@ -74,16 +74,17 @@ # - hosts: all vars: + env_path: /edx/app/edxapp/edxapp_env python_path: /edx/bin/python.edxapp manage_path: /edx/bin/manage.edxapp ignore_user_creation_errors: no - deployment_settings: "{{ EDXAPP_SETTINGS | default('aws') }}" + deployment_settings: "{{ EDXAPP_SETTINGS | default('production') }}" vars_files: - roles/common_vars/defaults/main.yml tasks: - name: Manage groups shell: > - {{ python_path }} {{ manage_path }} lms --settings={{ deployment_settings }} + . {{env_path}} && {{ python_path }} {{ manage_path }} lms --settings={{ deployment_settings }} manage_group {{ item.name | quote }} {% if item.get('permissions', []) | length %}--permissions {{ item.permissions | default([]) | map('quote') | join(' ') }}{% endif %} {% if item.get('remove') %}--remove{% endif %} @@ -93,7 +94,7 @@ - name: Manage users shell: > - {{ python_path }} {{ manage_path }} lms --settings={{ deployment_settings }} + . {{env_path}} && {{ python_path }} {{ manage_path }} lms --settings={{ deployment_settings }} manage_user {{ item.username | quote }} {{ item.email | quote }} {% if item.get('groups', []) | length %}--groups {{ item.groups | default([]) | map('quote') | join(' ') }}{% endif %} {% if item.get('remove') %}--remove{% endif %} @@ -103,6 +104,6 @@ {% if item.get('initial_password_hash') %}--initial-password-hash {{ item.initial_password_hash | quote }}{% endif %} with_items: "{{ django_users }}" register: manage_users_result - failed_when: (manage_users_result | failed) and not (ignore_user_creation_errors | bool) + failed_when: (manage_users_result is failed) and not (ignore_user_creation_errors | bool) become: true become_user: "{{ common_web_user }}" diff --git a/playbooks/masters_sandbox.yml b/playbooks/masters_sandbox.yml new file mode 100644 index 00000000000..b7eb99e523b --- /dev/null +++ b/playbooks/masters_sandbox.yml @@ -0,0 +1,78 @@ +# Creates initial state for a master's integration environment sandbox + +- name: setup master's integration environment + hosts: all + become: True + gather_facts: True + vars: + - edxapp_env_path: /edx/app/edxapp/edxapp_env + - edxapp_venv_dir: /edx/app/edxapp/venvs/edxapp + - edxapp_code_dir: /edx/app/edxapp/edx-platform + - registrar_env_path: /edx/app/registrar/registrar_env + - registrar_venv_dir: /edx/app/registrar/venvs/registrar + - registrar_code_dir: /edx/app/registrar/registrar + - lms_config_file: /edx/etc/lms.yml + - registrar_config_file: /edx/etc/registrar.yml + - jwt_signature_file: /tmp/lms_jwt_signature.yml + + tasks: + - name: create lms user + shell: . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms manage_user {{username}} {{email}} + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create dot application + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms create_dot_application "master's api client" {{username}} + --client-id {{organization_key}}-api-client-id --scopes=user_id + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create api access request + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms create_api_access_request {{username}} + --create-config --disconnect-signals + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create discovery site configuration + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms create_or_update_site_configuration {{dns_name}}.sandbox.edx.org + --configuration '{"COURSE_CATALOG_API_URL":"https://discovery-{{dns_name}}.sandbox.edx.org/api/v1","email_from_address":"edX "}' + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create LMS catalog integration + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms create_catalog_integrations --enabled --internal_api_url + https://discovery-{{dns_name}}.sandbox.edx.org --service_username discovery_worker --page_size 20 + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create LMS organization + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms + add_organization {{organization_key}} {{organization_key}} + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create registrar organization + shell: > + . {{ registrar_env_path }} && {{ registrar_venv_dir }}/bin/python manage.py create_organization {{organization_key}} + --group {{registrar_role}} + args: + chdir: "{{ registrar_code_dir }}" + + - name: create registrar user + shell: > + . {{ registrar_env_path }} && {{ registrar_venv_dir }}/bin/python manage.py create_user {{username}} --email {{email}} + --groups {{organization_key}}_{{registrar_role}} + args: + chdir: "{{ registrar_code_dir }}" + + - name: set up cron job to refresh lms cache + cron: + name: "refresh masters sandbox cache" + job: ". {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python {{ edxapp_code_dir }}/manage.py lms cache_programs" + hour: "0" + minute: "0" diff --git a/playbooks/masters_sandbox_update.yml b/playbooks/masters_sandbox_update.yml new file mode 100644 index 00000000000..5a471ecc88c --- /dev/null +++ b/playbooks/masters_sandbox_update.yml @@ -0,0 +1,59 @@ +# Updates master's sandbox environment with production data + +- name: update master's integration environment + hosts: all + become: True + gather_facts: True + vars: + - edxapp_env_path: /edx/app/edxapp/edxapp_env + - edxapp_venv_dir: /edx/app/edxapp/venvs/edxapp + - edxapp_code_dir: /edx/app/edxapp/edx-platform + - discovery_env_path: /edx/app/discovery/discovery_env + - discovery_venv_dir: /edx/app/discovery/venvs/discovery + - discovery_code_dir: /edx/app/discovery/discovery + - registrar_env_path: /edx/app/registrar/registrar_env + - registrar_venv_dir: /edx/app/registrar/venvs/registrar + - registrar_code_dir: /edx/app/registrar/registrar + - prod_catalog_host: https://discovery.edx.org + - prod_oauth_host: https://courses.edx.org + - instructor_username: staff@example.com + + tasks: + - name: setup edx partner + shell: > + . {{ discovery_env_path }} && {{ discovery_venv_dir }}/bin/python manage.py + create_or_update_partner --site-domain discovery-{{dns_name}}.sandbox.edx.org --code edx --name edX + args: + chdir: "{{ discovery_code_dir }}" + + - name: pull production discovery data + shell: > + . {{ discovery_env_path }} && {{ discovery_venv_dir }}/bin/python manage.py + load_program_fixture {{ program_uuids }} + --catalog-host {{ prod_catalog_host }} + --oauth-host {{ prod_oauth_host }} + --client-id {{ client_id }} + --client-secret {{ client_secret }} + args: + chdir: "{{ discovery_code_dir }}" + + - name: update LMS program cache + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms + cache_programs + args: + chdir: "{{ edxapp_code_dir }}" + + - name: create course shells in LMS + shell: > + . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py cms + sync_courses {{ instructor_username }} + args: + chdir: "{{ edxapp_code_dir }}" + + - name: load discovery programs into registrar + shell: > + . {{ registrar_env_path }} && {{ registrar_venv_dir }}/bin/python manage.py + manage_programs {{ program_uuids }} + args: + chdir: "{{ registrar_code_dir }}" diff --git a/playbooks/minos.yml b/playbooks/minos.yml index 2abffcbf16e..eca799dfe65 100644 --- a/playbooks/minos.yml +++ b/playbooks/minos.yml @@ -6,5 +6,6 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - minos diff --git a/playbooks/mongo.yml b/playbooks/mongo.yml index bd34d5c0599..3fed6ec11ab 100644 --- a/playbooks/mongo.yml +++ b/playbooks/mongo.yml @@ -3,7 +3,8 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - mongo - role: datadog when: COMMON_ENABLE_DATADOG diff --git a/playbooks/mongo_3_0.yml b/playbooks/mongo_3_0.yml index 8c38b670aab..dca352b65f9 100644 --- a/playbooks/mongo_3_0.yml +++ b/playbooks/mongo_3_0.yml @@ -15,7 +15,8 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - enhanced_networking - mongo_3_0 - munin_node diff --git a/playbooks/mongo_3_2.yml b/playbooks/mongo_3_2.yml index 7b87d94003c..c22c43ebae1 100644 --- a/playbooks/mongo_3_2.yml +++ b/playbooks/mongo_3_2.yml @@ -21,7 +21,8 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - mongo_3_2 - munin_node - role: datadog diff --git a/playbooks/mongo_3_4.yml b/playbooks/mongo_3_4.yml new file mode 100644 index 00000000000..139f46931ea --- /dev/null +++ b/playbooks/mongo_3_4.yml @@ -0,0 +1,35 @@ +# Manages a mongo cluster. +# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG +# as used by mongo_replica_set in the mongo_3_4 role. +# +# If you are initializing a cluster, your command might look like: +# ansible-playbook mongo_3_4.yml -i 203.0.113.11,203.0.113.12,203.0.113.13 -e@/path/to/edx.yml -e@/path/to/ed.yml +# If you just want to deploy an updated replica set config, you can run +# ansible-playbook mongo_3_4.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set +# +# ADDING A NEW CLUSTER MEMBER +# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory +# ansible-playbook mongo_3_4.yml -i 203.0.113.11,203.0.113.12,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python +- name: Deploy MongoDB + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mongo_3_4 + - munin_node + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/mongo_3_6.yml b/playbooks/mongo_3_6.yml new file mode 100644 index 00000000000..5056d2789de --- /dev/null +++ b/playbooks/mongo_3_6.yml @@ -0,0 +1,35 @@ +# Manages a mongo cluster. +# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG +# as used by mongo_replica_set in the mongo_3_6 role. +# +# If you are initializing a cluster, your command might look like: +# ansible-playbook mongo_3_6.yml -i 203.0.113.11,203.0.113.12,203.0.113.13 -e@/path/to/edx.yml -e@/path/to/ed.yml +# If you just want to deploy an updated replica set config, you can run +# ansible-playbook mongo_3_6.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set +# +# ADDING A NEW CLUSTER MEMBER +# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory +# ansible-playbook mongo_3_6.yml -i 203.0.113.11,203.0.113.12,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml +- name: Bootstrap instance(s) + hosts: all + gather_facts: no + become: True + roles: + - python +- name: Deploy MongoDB + hosts: all + become: True + gather_facts: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - mongo_3_6 + - munin_node + - role: datadog + when: COMMON_ENABLE_DATADOG + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: datadog-uninstall + when: not COMMON_ENABLE_DATADOG diff --git a/playbooks/mongo_mms.yml b/playbooks/mongo_mms.yml index 4ced480f0e4..f22c031ebbe 100644 --- a/playbooks/mongo_mms.yml +++ b/playbooks/mongo_mms.yml @@ -6,7 +6,8 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - mongo_mms - role: datadog when: COMMON_ENABLE_DATADOG diff --git a/playbooks/notes.yml b/playbooks/notes.yml index 553989bb501..0f0d0149cb8 100644 --- a/playbooks/notes.yml +++ b/playbooks/notes.yml @@ -6,7 +6,8 @@ ENABLE_DATADOG: False ENABLE_NEWRELIC: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_sites: - edx_notes_api @@ -19,4 +20,7 @@ when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE - role: datadog-uninstall when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'edx_notes_api' + when: EDX_NOTES_API_HERMES_ENABLED diff --git a/playbooks/notifier.yml b/playbooks/notifier.yml index b454bd839fb..ba2e9fb497b 100644 --- a/playbooks/notifier.yml +++ b/playbooks/notifier.yml @@ -6,7 +6,8 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - notifier - role: splunkforwarder when: COMMON_ENABLE_SPLUNKFORWARDER diff --git a/playbooks/oauth_client_setup.yml b/playbooks/oauth_client_setup.yml index 66770e0db06..335199f794f 100644 --- a/playbooks/oauth_client_setup.yml +++ b/playbooks/oauth_client_setup.yml @@ -9,7 +9,9 @@ - "roles/ecommerce/defaults/main.yml" - "roles/credentials/defaults/main.yml" - "roles/discovery/defaults/main.yml" - - "roles/journals/defaults/main.yml" - "roles/veda_web_frontend/defaults/main.yml" + - "roles/registrar/defaults/main.yml" + - "roles/designer/defaults/main.yml" + - "roles/enterprise_catalog/defaults/main.yml" roles: - oauth_client_setup diff --git a/playbooks/edx_sandbox.yml b/playbooks/openedx_native.yml similarity index 63% rename from playbooks/edx_sandbox.yml rename to playbooks/openedx_native.yml index 2e2b468aa11..e6ce209b2e7 100644 --- a/playbooks/edx_sandbox.yml +++ b/playbooks/openedx_native.yml @@ -1,8 +1,6 @@ --- -# Example sandbox configuration -# for single server community -# installs +# Open edX Native installation for single server community installs. - name: Bootstrap instance(s) hosts: all @@ -17,24 +15,38 @@ gather_facts: True vars: migrate_db: "yes" + EDXAPP_PREVIEW_LMS_BASE: '{{ EDXAPP_LMS_BASE }}' + EDXAPP_LOGIN_REDIRECT_WHITELIST: [ "{{ EDXAPP_CMS_BASE }}" ] + EDXAPP_LMS_BASE_SCHEME: http EDXAPP_LMS_NGINX_PORT: '80' - edx_platform_version: 'master' + EDX_PLATFORM_VERSION: 'master' + edx_platform_version: "{{ EDX_PLATFORM_VERSION }}" # Set to false if deployed behind another proxy/load balancer. NGINX_SET_X_FORWARDED_HEADERS: True DISCOVERY_URL_ROOT: 'http://localhost:{{ DISCOVERY_NGINX_PORT }}' + AWS_GATHER_FACTS: false + COMMON_ENABLE_AWS_ROLE: false ecommerce_create_demo_data: true credentials_create_demo_data: true + CONFIGURE_JWTS: true + SANDBOX_ENABLE_BLOCKSTORE: false SANDBOX_ENABLE_DISCOVERY: true SANDBOX_ENABLE_ECOMMERCE: true SANDBOX_ENABLE_ANALYTICS_API: true SANDBOX_ENABLE_INSIGHTS: true SANDBOX_ENABLE_RABBITMQ: true - JOURNALS_ENABLED: false + SANDBOX_ENABLE_NOTES: false + DEMO_ROLE_ENABLED: true + ECOMMERCE_ENABLE_COMPREHENSIVE_THEMING: false + EDXAPP_ENABLE_MEMCACHE: true + EDXAPP_ENABLE_ELASTIC_SEARCH: true roles: - role: swapfile SWAPFILE_SIZE: 4GB - role: scorm when: "{{ EDXAPP_XBLOCK_SETTINGS }}.get('ScormXBlock', False)" + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_sites: - certs @@ -48,8 +60,8 @@ - role: edxlocal when: EDXAPP_MYSQL_HOST == 'localhost' - role: memcache - when: "'localhost' in ' '.join(EDXAPP_MEMCACHE)" - - role: mongo_3_2 + when: EDXAPP_ENABLE_MEMCACHE + - role: mongo_3_6 when: "'localhost' in EDXAPP_MONGO_HOSTS" - role: rabbitmq rabbitmq_ip: 127.0.0.1 @@ -57,6 +69,8 @@ - role: edxapp celery_worker: True - edxapp + - role: blockstore + when: SANDBOX_ENABLE_BLOCKSTORE - role: ecommerce when: SANDBOX_ENABLE_ECOMMERCE - role: ecomworker @@ -66,19 +80,27 @@ when: SANDBOX_ENABLE_ANALYTICS_API - role: insights when: SANDBOX_ENABLE_INSIGHTS - - edx_notes_api + - role: edx_notes_api + when: SANDBOX_ENABLE_NOTES - role: insights when: SANDBOX_ENABLE_INSIGHTS - - demo + - role: demo + when: DEMO_ROLE_ENABLED + - role: edx_notes_api + # Run the nginx role to install edx_notes_api config since the app role + # currently doesn't do that. + - role: nginx + nginx_sites: + - edx_notes_api + when: SANDBOX_ENABLE_NOTES + - role: demo - oauth_client_setup - oraclejdk - role: elasticsearch - when: "'localhost' in EDXAPP_ELASTIC_SEARCH_CONFIG|map(attribute='host')" + when: EDXAPP_ENABLE_ELASTIC_SEARCH - forum - role: discovery when: SANDBOX_ENABLE_DISCOVERY - - role: journals - when: JOURNALS_ENABLED - role: notifier NOTIFIER_DIGEST_TASK_INTERVAL: 5 - role: xqueue @@ -95,3 +117,5 @@ when: POSTFIX_QUEUE_EXTERNAL_SMTP_HOST != '' - role: datadog-uninstall when: not COMMON_ENABLE_DATADOG + - role: user_retirement_pipeline + when: COMMON_RETIREMENT_SERVICE_SETUP diff --git a/playbooks/ora2.yml b/playbooks/ora2.yml index 9c7fcb3b00c..0b5d5378142 100644 --- a/playbooks/ora2.yml +++ b/playbooks/ora2.yml @@ -8,6 +8,7 @@ become: True gather_facts: True vars: + - env_path: /edx/app/edxapp/edxapp_env - edxapp_venv_dir: "/edx/app/edxapp/venvs/edxapp" - edxapp_code_dir: "/edx/app/edxapp/edx-platform" - edxapp_deploy_path: "{{ edxapp_venv_dir }}/bin:{{ edxapp_code_dir }}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" @@ -31,7 +32,7 @@ - name: migrate shell: > - {{ edxapp_venv_dir }}/bin/python manage.py lms migrate --settings=aws --noinput + . {{env_path}} && {{ edxapp_venv_dir }}/bin/python manage.py lms migrate --settings=production --noinput chdir={{ edxapp_code_dir }} environment: DB_MIGRATION_USER: "{{ edxapp_mysql_user }}" diff --git a/playbooks/populate_configuration_model.yml b/playbooks/populate_configuration_model.yml index 9a67c3ae54c..c5a76da9d1f 100644 --- a/playbooks/populate_configuration_model.yml +++ b/playbooks/populate_configuration_model.yml @@ -36,6 +36,7 @@ vars: python_path: /edx/bin/python.edxapp manage_path: /edx/bin/manage.edxapp + lms_env: /edx/app/edxapp/edxapp_env become_user: www-data become: true tasks: @@ -48,7 +49,7 @@ dest: "{{ xblock_config_temp_directory.stdout }}/{{ file | basename }}" register: xblock_config_file - name: Manage xblock configurations - shell: "{{ python_path }} {{ manage_path }} lms --settings=aws populate_model -f {{ xblock_config_file.dest | quote }} -u {{ user }}" + shell: ". {{lms_env}} && {{ python_path }} {{ manage_path }} lms --settings=production populate_model -f {{ xblock_config_file.dest | quote }} -u {{ user }}" register: command_result changed_when: "'Import complete, 0 new entries created' not in command_result.stdout" - debug: msg="{{ command_result.stdout }}" diff --git a/playbooks/program_manager.yml b/playbooks/program_manager.yml new file mode 100644 index 00000000000..8be9db28785 --- /dev/null +++ b/playbooks/program_manager.yml @@ -0,0 +1,21 @@ +- name: Deploy Program-manager Frontend + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'program-manager' + PROGRAM_MANAGER_ENABLED: True + PROGRAM_MANAGER_SANDBOX_BUILD: False + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_sites: + - program_manager + PROGRAM_MANAGER_NGINX_PORT: 8976 + - program_manager + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/prospectus.yml b/playbooks/prospectus.yml new file mode 100644 index 00000000000..8e90fdf5e7c --- /dev/null +++ b/playbooks/prospectus.yml @@ -0,0 +1,29 @@ +- name: Deploy edX Prospectus Service + hosts: all + become: True + gather_facts: True + vars: + ENABLE_DATADOG: False + ENABLE_NEWRELIC: False + CLUSTER_NAME: 'prospectus' + PROSPECTUS_DATA_DIR: "/edx/var/prospectus" + NGINX_OVERRIDE_DEFAULT_MAP_HASH_SIZE: True + NGINX_MAP_HASH_MAX_SIZE: 4096 + NGINX_MAP_HASH_BUCKET_SIZE: 128 + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: automated + AUTOMATED_USERS: "{{ PROSPECTUS_AUTOMATED_USERS | default({}) }}" + - role: prospectus + - role: nginx + nginx_app_dir: "/etc/nginx" + nginx_sites: + - prospectus + nginx_default_sites: + - prospectus + PROSPECTUS_NGINX_PORT: 8000 + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE diff --git a/playbooks/rabbitmq.yml b/playbooks/rabbitmq.yml index 2c4e1ec57ac..0e166a9bf00 100644 --- a/playbooks/rabbitmq.yml +++ b/playbooks/rabbitmq.yml @@ -15,7 +15,7 @@ CLUSTER_NAME: 'rabbitmq' serial: "{{ serial_count }}" pre_tasks: - - action: ec2_facts + - action: ec2_metadata_facts when: elb_pre_post - debug: var: ansible_ec2_instance_id @@ -30,7 +30,8 @@ become: False when: elb_pre_post roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: automated AUTOMATED_USERS: "{{ RABBIT_AUTOMATED_USERS | default({}) }}" tags: diff --git a/playbooks/redirector.yml b/playbooks/redirector.yml index 03c8d7f1ec5..0b2128f9118 100644 --- a/playbooks/redirector.yml +++ b/playbooks/redirector.yml @@ -7,7 +7,8 @@ CLUSTER_NAME: 'redirector' serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_redirects: "{{ NGINX_REDIRECTOR_CUSTOM_REDIRECTS }}" REDIRECT_NGINX_PORT: "80" diff --git a/playbooks/redis.yml b/playbooks/redis.yml index b9300f21063..7565d85e285 100644 --- a/playbooks/redis.yml +++ b/playbooks/redis.yml @@ -3,7 +3,8 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - redis - role: datadog when: COMMON_ENABLE_DATADOG diff --git a/playbooks/registrar.yml b/playbooks/registrar.yml new file mode 100644 index 00000000000..018baceae81 --- /dev/null +++ b/playbooks/registrar.yml @@ -0,0 +1,22 @@ +- name: Deploy edX Registrar + hosts: all + become: True + gather_facts: True + vars: + ENABLE_NEWRELIC: True + CLUSTER_NAME: 'registrar' + REGISTRAR_ENABLED: True + roles: + - role: aws + when: COMMON_ENABLE_AWS_ROLE + - role: nginx + nginx_default_sites: + - registrar + - registrar + - role: splunkforwarder + when: COMMON_ENABLE_SPLUNKFORWARDER + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE + - role: hermes + HERMES_TARGET_SERVICE: 'registrar' + when: REGISTRAR_HERMES_ENABLED diff --git a/playbooks/restart_supervisor.yml b/playbooks/restart_supervisor.yml index ec26bfefe62..6724da9f9e6 100644 --- a/playbooks/restart_supervisor.yml +++ b/playbooks/restart_supervisor.yml @@ -11,5 +11,5 @@ name: "{{ supervisor_service }}" state: restarted register: rc - until: rc|success + until: rc is succeeded retries: 5 diff --git a/playbooks/retire_host.yml b/playbooks/retire_host.yml index 8c46408c3d6..32fddd184f1 100644 --- a/playbooks/retire_host.yml +++ b/playbooks/retire_host.yml @@ -8,6 +8,7 @@ # This is separate because it's use of handlers # leads to various race conditions. # + - name: Stop all services hosts: "{{TARGET}}" become: True @@ -45,3 +46,4 @@ tasks: - name: Run minos command: /edx/app/minos/venvs/bin/minos --config /edx/etc/minos.yml --json + ignore_errors: yes diff --git a/playbooks/roles/ad_hoc_reporting/defaults/main.yml b/playbooks/roles/ad_hoc_reporting/defaults/main.yml index 5c4cd5792e8..87704d21821 100644 --- a/playbooks/roles/ad_hoc_reporting/defaults/main.yml +++ b/playbooks/roles/ad_hoc_reporting/defaults/main.yml @@ -44,7 +44,8 @@ ad_hoc_reporting_pip_pkgs: MONGODB_APT_KEY: "7F0CEB10" MONGODB_APT_KEYSERVER: "keyserver.ubuntu.com" MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" -mongo_version: 3.2.12 +MONGO_VERSION: 3.2.12 +mongo_version: "{{ MONGO_VERSION }}" # AD_HOC_REPORTING_REPLICA_DB_HOSTS: # - db_host: "{{ EDXAPP_MYSQL_REPLICA_HOST }}" @@ -52,5 +53,9 @@ mongo_version: 3.2.12 # script_name: edxapp-mysql.sh # #depends on no other vars # depends_on: True - + AD_HOC_REPORTING_REPLICA_DB_HOSTS: [] + +AWS_RDS_IAM_AUTHENTICATION: false + +aws_rds_ca_cert_key_url: "https://s3.amazonaws.com/rds-downloads/rds-combined-ca-bundle.pem" diff --git a/playbooks/roles/ad_hoc_reporting/meta/main.yml b/playbooks/roles/ad_hoc_reporting/meta/main.yml index bd92aabecaa..5a126aa92ab 100644 --- a/playbooks/roles/ad_hoc_reporting/meta/main.yml +++ b/playbooks/roles/ad_hoc_reporting/meta/main.yml @@ -16,4 +16,7 @@ dependencies: user_info: "{{ AD_HOC_REPORTING_USER_INFO }}" tags: - users - + - role: user + user_info: "{{ COMMON_USER_INFO }}" + tags: + - users diff --git a/playbooks/roles/ad_hoc_reporting/tasks/main.yml b/playbooks/roles/ad_hoc_reporting/tasks/main.yml index 450d324e291..2ac55216238 100644 --- a/playbooks/roles/ad_hoc_reporting/tasks/main.yml +++ b/playbooks/roles/ad_hoc_reporting/tasks/main.yml @@ -22,9 +22,8 @@ - name: install system packages apt: - name: "{{ item }}" + name: "{{ ad_hoc_reporting_debian_pkgs }}" state: present - with_items: "{{ ad_hoc_reporting_debian_pkgs }}" tags: - install:system-requirements @@ -56,10 +55,9 @@ - name: install python packages pip: - name: "{{ item }}" + name: "{{ ad_hoc_reporting_pip_pkgs }}" state: present extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" - with_items: "{{ ad_hoc_reporting_pip_pkgs }}" tags: - install:app-requirements @@ -76,6 +74,15 @@ - scripts - install:base +- name: Get the AWS rds ca certificate to connect db using SSL + shell: "curl {{ aws_rds_ca_cert_key_url }} -o rds-combined-ca-bundle.pem" + args: + chdir: /edx/bin + when: AWS_RDS_IAM_AUTHENTICATION + tags: + - scripts + - install:base + #These templates rely on there being a global # read_only mysql user, you must override the default # in order for these templates to be written out. @@ -88,7 +95,21 @@ mode: 0755 owner: root group: root - when: COMMON_MYSQL_READ_ONLY_PASS is defined and item.depends_on + when: COMMON_MYSQL_READ_ONLY_PASS is defined and item.depends_on and not AWS_RDS_IAM_AUTHENTICATION + tags: + - scripts + - scripts:mysql + - install:code + with_items: "{{ AD_HOC_REPORTING_REPLICA_DB_HOSTS }}" + +- name: install common rds iam replica scripts + template: + src: edx/bin/rds-iam.sh.j2 + dest: /edx/bin/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ item.script_name }} + mode: 0755 + owner: root + group: root + when: item.db_host|length>0 and AWS_RDS_IAM_AUTHENTICATION tags: - scripts - scripts:mysql @@ -98,7 +119,7 @@ # These templates rely on there being a global # read_only mongo user, you must override the default # in order for these templates to be written out -- name: install mongodb replica scripts +- name: install read_only user mongodb replica scripts template: src: edx/bin/mongo.sh.j2 dest: /edx/bin/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ item.script_name }} @@ -110,11 +131,37 @@ db_name: "{{ EDXAPP_MONGO_DB_NAME }}" db_port: "{{ EDXAPP_MONGO_PORT }}" script_name: edxapp-mongo.sh + read_only_access: "{{ EDXAPP_MONGO_READ_ONLY_ACCESS }}" - db_hosts: "{{ FORUM_MONGO_HOSTS_FOR_AD_HOC_REPORTING }}" db_name: "{{ FORUM_MONGO_DATABASE }}" db_port: "{{ FORUM_MONGO_PORT }}" script_name: forum-mongo.sh - when: COMMON_MONGO_READ_ONLY_PASS is defined + read_only_access: "{{ FORUM_MONGO_READ_ONLY_ACCESS }}" + when: COMMON_MONGO_READ_ONLY_PASS is defined and item.read_only_access + tags: + - scripts + - scripts:mongo + - install:code + +- name: install single user access mongodb replica scripts + template: + src: edx/bin/mongo-user-auth.sh.j2 + dest: /edx/bin/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ item.script_name }} + mode: 0755 + owner: root + group: root + with_items: + - db_hosts: "{{ EDXAPP_MONGO_HOSTS }}" + db_name: "{{ EDXAPP_MONGO_DB_NAME }}" + db_port: "{{ EDXAPP_MONGO_PORT }}" + script_name: edxapp-mongo-user-auth.sh + read_only_access: "{{ EDXAPP_MONGO_READ_ONLY_ACCESS }}" + - db_hosts: "{{ FORUM_MONGO_HOSTS_FOR_AD_HOC_REPORTING }}" + db_name: "{{ FORUM_MONGO_DATABASE }}" + db_port: "{{ FORUM_MONGO_PORT }}" + script_name: forum-mongo-user-auth.sh + read_only_access: "{{ FORUM_MONGO_READ_ONLY_ACCESS }}" + when: not item.read_only_access tags: - scripts - scripts:mongo diff --git a/playbooks/roles/ad_hoc_reporting/templates/edx/bin/mongo-user-auth.sh.j2 b/playbooks/roles/ad_hoc_reporting/templates/edx/bin/mongo-user-auth.sh.j2 new file mode 100644 index 00000000000..4f7850a00d3 --- /dev/null +++ b/playbooks/roles/ad_hoc_reporting/templates/edx/bin/mongo-user-auth.sh.j2 @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +db_hosts={{ item.db_hosts }} + +username=$1 +echo "Input your password: " +read -s password + +if [[ -z $username ]]; then + echo "Username required! Rerun using the following format to connect /edx/bin/this-script.sh USERNAME" + exit 1 +fi + +for host in ${db_hosts//,/ }; do + is_secondary=$(mongo ${host}:{{ item.db_port }}/{{ item.db_name }} -u ${username} -p"${password}" --quiet --eval "printjson(db.isMaster()['secondary'])") + is_hidden=$(mongo ${host}:{{ item.db_port }}/{{ item.db_name }} -u ${username} -p"${password}" --quiet --eval "printjson(db.isMaster()['hidden'])") + if [[ $is_hidden == "true" ]]; then + replica=$host + # Found a hidden secondary no need to keep looking. + break + fi + + if [[ $is_secondary == "true" ]]; then + replica=$host + # Found a secondary but there could be a hidden secondary. + # keep looking. +fi +done + +if [[ -z $replica ]]; then + echo "No replica found for $db_hosts!" + exit 1 +fi + +mongo ${replica}:{{ item.db_port }}/{{ item.db_name }} -u ${username} -p"${password}" diff --git a/playbooks/roles/ad_hoc_reporting/templates/edx/bin/rds-iam.sh.j2 b/playbooks/roles/ad_hoc_reporting/templates/edx/bin/rds-iam.sh.j2 new file mode 100644 index 00000000000..faab7a1fe1e --- /dev/null +++ b/playbooks/roles/ad_hoc_reporting/templates/edx/bin/rds-iam.sh.j2 @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +unset AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SECURITY_TOKEN AWS_SESSION_TOKEN + +{% if COMMON_DEPLOYMENT == 'edge' %} +ROLE_ARN={{ RDS_IAM_AUTHENTICATION_ROLE_ARN }} +RESULT=(`aws sts assume-role --role-arn $ROLE_ARN \ + --role-session-name rds_read_only_iam \ + --query '[Credentials.AccessKeyId,Credentials.SecretAccessKey,Credentials.SessionToken]' \ + --output text`) +export AWS_ACCESS_KEY_ID=${RESULT[0]} +export AWS_SECRET_ACCESS_KEY=${RESULT[1]} +export AWS_SECURITY_TOKEN=${RESULT[2]} +export AWS_SESSION_TOKEN=${AWS_SECURITY_TOKEN} +{% endif %} + +logger "Generating token for {{ item.db_host }}" + +TOKEN="$(aws rds generate-db-auth-token --hostname {{ item.db_host }} --port 3306 --region us-east-1 --username {{ COMMON_MYSQL_READ_ONLY_USER }} )" + +if [[ "$1" == "generate-db-auth-token" ]]; then + echo "Third party application access credentials" + echo "----" + echo "Hostname: {{ item.db_host }}" + echo "Port: 3306" + echo "Username: read_only_iam" + echo "Password: $TOKEN" + echo " " + echo "see https://openedx.atlassian.net/wiki/spaces/EdxOps/pages/26182437/How+to+Access+a+Read+Replica for documentation" +elif [[ -z "$1" ]]; then + mysql -u {{ COMMON_MYSQL_READ_ONLY_USER }} -h {{ item.db_host }} --enable-cleartext-plugin --ssl-ca=/edx/bin/rds-combined-ca-bundle.pem --password=$TOKEN {{ item.db_name }} +else + echo "USAGE:" + echo " Generates an auth token:" + echo " /edx/bin/db_iam_auth_script generate-db-auth-token" + echo " Connect to a db with IAM auth token:" + echo " /edx/bin/db_iam_auth_script" +fi diff --git a/playbooks/roles/alton/defaults/main.yml b/playbooks/roles/alton/defaults/main.yml deleted file mode 100644 index 1cc798fdc2a..00000000000 --- a/playbooks/roles/alton/defaults/main.yml +++ /dev/null @@ -1,97 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS -# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -## -# Defaults for role alton -# - -# -# vars are namespace with the module name. -# - - -ALTON_USERNAME: '1234_1234@chat.hipchat.com' -ALTON_PASSWORD: 'password' -ALTON_V1_TOKEN: 'HIPCHAT_V1_TOKEN' -ALTON_V2_TOKEN: 'HIPCHAT_V2_TOKEN' -ALTON_ROOMS: 'Hammer' -ALTON_NAME: 'Alton W. Daemon' -ALTON_HANDLE: 'alton' -ALTON_REDIS_URL: 'redis://fakeuser:redispassword@localhost:6379' -ALTON_HTTPSERVER_PORT: '8081' -ALTON_WORLD_WEATHER_KEY: !!null -ALTON_AWS_CREDENTIALS: !!null -ALTON_TWOFACTOR_PRINCIPLE: "email@domain.org" -ALTON_TWOFACTOR_ISSUER: "Alton" -ALTON_TWOFACTOR_S3_BUCKET: "alton-twofactor" -ALTON_TWOFACTOR_S3_PROFILE: "aws" -ALTON_ADMIN_USERS: - - "some" - - "hipchat" - - "usernames" -ALTON_TWOFACTOR_SECRET: "CHANGEME" -ALTON_NOTIFY_CALLBACK_URL: "http://alton.hostname:{{ALTON_HTTPSERVER_PORT}}/notify/" - - -# Needed if you want to build AMIs from alton. -ALTON_JENKINS_URL: !!null -ALTON_JENKINS_API_USER: !!null -ALTON_JENKINS_API_KEY: !!null - -alton_role_name: alton - -alton_user: alton -alton_app_dir: "{{ COMMON_APP_DIR }}/alton" -alton_code_dir: "{{ alton_app_dir }}/alton" -alton_venvs_dir: "{{ alton_app_dir }}/venvs" -alton_venv_dir: "{{ alton_venvs_dir }}/alton" -alton_venv_bin: "{{ alton_venv_dir }}/bin" - -alton_source_repo: "https://github.com/edx/alton.git" -alton_version: "master" -alton_requirements_file: "{{ alton_code_dir }}/requirements.txt" - -alton_supervisor_wrapper: "{{ alton_app_dir }}/alton-supervisor.sh" - -alton_environment: - WILL_USERNAME: "{{ ALTON_USERNAME }}" - WILL_PASSWORD: "{{ ALTON_PASSWORD }}" - WILL_V1_TOKEN: "{{ ALTON_V1_TOKEN }}" - WILL_V2_TOKEN: "{{ ALTON_V2_TOKEN }}" - WILL_ROOMS: "{{ ALTON_ROOMS }}" - WILL_NAME: "{{ ALTON_NAME }}" - WILL_HANDLE: "{{ ALTON_HANDLE }}" - WILL_REDIS_URL: "{{ ALTON_REDIS_URL }}" - WILL_HTTPSERVER_PORT: "{{ ALTON_HTTPSERVER_PORT }}" - WILL_WORLD_WEATHER_ONLINE_KEY: "{{ ALTON_WORLD_WEATHER_KEY }}" - WILL_JENKINS_URL: "{{ ALTON_JENKINS_URL }}" - WILL_JENKINS_API_USER: "{{ ALTON_JENKINS_API_USER }}" - WILL_JENKINS_API_KEY: "{{ ALTON_JENKINS_API_KEY }}" - WILL_BOTO_CONFIG: "{{ alton_app_dir }}/.boto" - BOTO_CONFIG: "{{ alton_app_dir }}/.boto" - WILL_TWOFACTOR_PRINCIPLE: "{{ ALTON_TWOFACTOR_PRINCIPLE }}" - WILL_TWOFACTOR_ISSUER: "{{ ALTON_TWOFACTOR_ISSUER }}" - WILL_TWOFACTOR_S3_BUCKET: "{{ ALTON_TWOFACTOR_S3_BUCKET }}" - WILL_TWOFACTOR_S3_PROFILE: "{{ ALTON_TWOFACTOR_S3_PROFILE }}" - WILL_ADMIN_USERS: "{{','.join(ALTON_ADMIN_USERS) }}" - WILL_TWOFACTOR_SECRET: "{{ ALTON_TWOFACTOR_SECRET }}" - WILL_NOTIFY_CALLBACK_URL: "{{ ALTON_NOTIFY_CALLBACK_URL }}" - WILL_PIPELINE_BUCKET_NAME: "{{ ALTON_PIPELINE_BUCKET_NAME }}" - WILL_GOCD_USERNAME: "{{ ALTON_GOCD_USERNAME }}" - WILL_GOCD_PASSWORD: "{{ ALTON_GOCD_PASSWORD }}" - WILL_GOCD_SERVER_URL: "{{ ALTON_GOCD_SERVER_URL }}" - -# -# OS packages -# - -alton_debian_pkgs: [] - -alton_redhat_pkgs: [] - diff --git a/playbooks/roles/alton/handlers/main.yml b/playbooks/roles/alton/handlers/main.yml deleted file mode 100644 index 4b59bf234bd..00000000000 --- a/playbooks/roles/alton/handlers/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS -# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Handlers for role alton -# -# Overview: -# -# -- name: restart alton - supervisorctl: - name: alton - supervisorctl_path: "{{ supervisor_ctl }}" - config: "{{ supervisor_cfg }}" - state: restarted - when: not disable_edx_services diff --git a/playbooks/roles/alton/tasks/deploy.yml b/playbooks/roles/alton/tasks/deploy.yml deleted file mode 100644 index e07b922308a..00000000000 --- a/playbooks/roles/alton/tasks/deploy.yml +++ /dev/null @@ -1,73 +0,0 @@ ---- -- name: Setup the env and configure the boto profiles for alton - template: - src: "{{ item.src }}" - dest: "{{ alton_app_dir }}/{{ item.dest }}" - owner: "{{ alton_user }}" - group: "{{ common_web_user }}" - mode: "0640" - with_items: - - { src: 'alton_env.j2', dest: 'alton_env' } - - { src: 'boto.j2', dest: '.boto' } - notify: restart alton - -- name: Checkout the code - git: - dest: "{{ alton_code_dir }}" - repo: "{{ alton_source_repo }}" - version: "{{ alton_version }}" - accept_hostkey: yes - become_user: "{{ alton_user }}" - register: alton_checkout - notify: restart alton - -- name: Install the requirements - pip: - requirements: "{{ alton_requirements_file }}" - virtualenv: "{{ alton_venv_dir }}" - state: present - extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" - become_user: "{{ alton_user }}" - notify: restart alton - -- name: Create the supervisor wrapper - template: - src: "{{ alton_supervisor_wrapper | basename }}.j2" - dest: "{{ alton_supervisor_wrapper }}" - mode: "0755" - sudo_user: "{{ alton_user }}" - notify: restart alton - -- name: Create a supervisor config - template: - src: alton.conf.j2 - dest: "{{ supervisor_available_dir }}/alton.conf" - owner: "{{ supervisor_user }}" - group: "{{ supervisor_user }}" - become_user: "{{ supervisor_user }}" - notify: restart alton - -- name: Enable the supervisor config - file: - src: "{{ supervisor_available_dir }}/alton.conf" - dest: "{{ supervisor_cfg_dir }}/alton.conf" - state: link - force: yes - mode: "0644" - become_user: "{{ supervisor_user }}" - when: not disable_edx_services - notify: restart alton - -- name: Update supervisor configuration - shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" - register: supervisor_update - changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != "" - when: not disable_edx_services - -- name: Ensure alton is started - supervisorctl: - name: alton - supervisorctl_path: "{{ supervisor_ctl }}" - config: "{{ supervisor_cfg }}" - state: started - when: not disable_edx_services diff --git a/playbooks/roles/alton/tasks/main.yml b/playbooks/roles/alton/tasks/main.yml deleted file mode 100644 index 6d102cf7b8f..00000000000 --- a/playbooks/roles/alton/tasks/main.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS -# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Tasks for role alton -# -# Overview: -# -# -# Dependencies: -# -# -# Example play: -# -# - -- name: Create application user - user: - name: "{{ alton_user }}" - home: "{{ alton_app_dir }}" - createhome: no - shell: /bin/false - -- name: Create alton user dirs - file: - path: "{{ item }}" - state: directory - owner: "{{ alton_user }}" - group: "{{ common_web_group }}" - with_items: - - "{{ alton_app_dir }}" - - "{{ alton_venvs_dir }}" - -- include: deploy.yml - tags: - - deploy diff --git a/playbooks/roles/alton/templates/alton-supervisor.sh.j2 b/playbooks/roles/alton/templates/alton-supervisor.sh.j2 deleted file mode 100644 index 4f30a1d1dd4..00000000000 --- a/playbooks/roles/alton/templates/alton-supervisor.sh.j2 +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -source {{ alton_app_dir }}/alton_env -cd {{ alton_code_dir }} - -{{ alton_venv_bin }}/python run_alton.py diff --git a/playbooks/roles/alton/templates/alton.conf.j2 b/playbooks/roles/alton/templates/alton.conf.j2 deleted file mode 100644 index 69816e6c48f..00000000000 --- a/playbooks/roles/alton/templates/alton.conf.j2 +++ /dev/null @@ -1,10 +0,0 @@ -[program:alton] - -command={{ alton_supervisor_wrapper }} -priority=999 -user={{ common_web_user }} -stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log -killasgroup=true -stopasgroup=true -stopsignal=QUIT diff --git a/playbooks/roles/alton/templates/alton_env.j2 b/playbooks/roles/alton/templates/alton_env.j2 deleted file mode 100644 index ba2a561f12d..00000000000 --- a/playbooks/roles/alton/templates/alton_env.j2 +++ /dev/null @@ -1,10 +0,0 @@ -# {{ ansible_managed }} - -{% for name,value in alton_environment.items() -%} -{%- if value -%} -export {{ name }}="{{ value }}" -{% endif %} -{%- endfor %} - -export WILL_BOTO_PROFILES="{{ ALTON_AWS_CREDENTIALS|join(';') }}" - diff --git a/playbooks/roles/alton/templates/boto.j2 b/playbooks/roles/alton/templates/boto.j2 deleted file mode 100644 index 28f21ce1b07..00000000000 --- a/playbooks/roles/alton/templates/boto.j2 +++ /dev/null @@ -1,6 +0,0 @@ -{% for deployment, creds in ALTON_AWS_CREDENTIALS.iteritems() %} -[profile {{ deployment }}] -aws_access_key_id = {{ creds.access_id }} -aws_secret_access_key = {{ creds.secret_key }} - -{% endfor %} diff --git a/playbooks/roles/analytics_api/defaults/main.yml b/playbooks/roles/analytics_api/defaults/main.yml index 83a36cdf772..6902890b973 100644 --- a/playbooks/roles/analytics_api/defaults/main.yml +++ b/playbooks/roles/analytics_api/defaults/main.yml @@ -142,6 +142,12 @@ ANALYTICS_API_REPORT_DOWNLOAD_BACKEND: ANALYTICS_API_CSRF_COOKIE_SECURE: false +ANALYTICS_API_ALLOW_CORS_HEADERS: true +ANALYTICS_API_ALLOW_CORS_CREDENTIALS: true +ANALYTICS_API_CORS_ORIGIN_WHITELIST: [] +ANALYTICS_API_BASIC_AUTH_EXEMPTED_PATHS: + - 'enterprise' + analytics_api_service_config_overrides: API_AUTH_TOKEN: '{{ ANALYTICS_API_AUTH_TOKEN }}' STATICFILES_DIRS: ['static'] @@ -169,3 +175,11 @@ analytics_api_service_config_overrides: # Default dummy user, override this!! ANALYTICS_API_USERS: "dummy-api-user": "changeme" + +ANALYTICS_API_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +# Remote config +ANALYTICS_API_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +ANALYTICS_API_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +ANALYTICS_API_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" diff --git a/playbooks/roles/analytics_api/meta/main.yml b/playbooks/roles/analytics_api/meta/main.yml index e9a12c4fc65..701d1ad396c 100644 --- a/playbooks/roles/analytics_api/meta/main.yml +++ b/playbooks/roles/analytics_api/meta/main.yml @@ -27,6 +27,7 @@ dependencies: edx_django_service_home: '{{ COMMON_APP_DIR }}/{{ analytics_api_service_name }}' edx_django_service_config_overrides: '{{ analytics_api_service_config_overrides }}' edx_django_service_nginx_port: '{{ ANALYTICS_API_NGINX_PORT }}' + edx_django_service_nginx_read_timeout: 300 edx_django_service_ssl_nginx_port: '{{ ANALYTICS_API_SSL_NGINX_PORT }}' edx_django_service_default_db_host: '{{ ANALYTICS_API_DEFAULT_HOST }}' edx_django_service_default_db_name: '{{ ANALYTICS_API_DEFAULT_DB_NAME }}' @@ -38,7 +39,13 @@ dependencies: edx_django_service_django_settings_module: '{{ ANALYTICS_API_DJANGO_SETTINGS_MODULE }}' edx_django_service_environment_extra: '{{ analytics_api_environment }}' edx_django_service_secret_key: '{{ ANALYTICS_API_SECRET_KEY }}' - edx_django_service_use_python3: false edx_django_service_wsgi_name: '{{ analytics_api_wsgi_name }}' edx_django_service_hostname: '~^((stage|prod)-)?{{ analytics_api_hostname }}.*' edx_django_service_newrelic_appname: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ analytics_api_newrelic_appname }}' + edx_django_service_cors_whitelist: '{{ ANALYTICS_API_CORS_ORIGIN_WHITELIST }}' + edx_django_service_allow_cors_headers: '{{ ANALYTICS_API_ALLOW_CORS_HEADERS }}' + edx_django_service_allow_cors_credentials: '{{ ANALYTICS_API_ALLOW_CORS_CREDENTIALS }}' + edx_django_service_basic_auth_exempted_paths_extra: '{{ ANALYTICS_API_BASIC_AUTH_EXEMPTED_PATHS }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ ANALYTICS_API_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_decrypt_config_enabled: '{{ ANALYTICS_API_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ ANALYTICS_API_COPY_CONFIG_ENABLED }}' diff --git a/playbooks/roles/analytics_pipeline/defaults/main.yml b/playbooks/roles/analytics_pipeline/defaults/main.yml index 4f4f3814aef..61cbc7db0fe 100644 --- a/playbooks/roles/analytics_pipeline/defaults/main.yml +++ b/playbooks/roles/analytics_pipeline/defaults/main.yml @@ -29,11 +29,10 @@ ANALYTICS_PIPELINE_INPUT_DATABASE_HOST: localhost ANALYTICS_PIPELINE_INPUT_DATABASE_PORT: 3306 ANALYTICS_PIPELINE_INPUT_DATABASE_OPTIONS: {} ANALYTICS_PIPELINE_INPUT_DATABASE: - username: "{{ COMMON_MYSQL_READ_ONLY_USER }}" - password: "{{ COMMON_MYSQL_READ_ONLY_PASS }}" - host: "{{ ANALYTICS_PIPELINE_INPUT_DATABASE_HOST }}" - port: "{{ ANALYTICS_PIPELINE_INPUT_DATABASE_PORT }}" - options: "{{ ANALYTICS_PIPELINE_INPUT_DATABASE_OPTIONS }}" + username: "{{ COMMON_ANALYTICS_MYSQL_READ_ONLY_USER }}" + password: "{{ COMMON_ANALYTICS_MYSQL_READ_ONLY_PASS }}" + host: localhost + port: 3306 ANALYTICS_PIPELINE_CONFIG_DIR: "{{ COMMON_CFG_DIR }}/edx-analytics-pipeline" ANALYTICS_PIPELINE_HDFS_DATA_DIR: "hdfs://localhost:9000/data" diff --git a/playbooks/roles/analytics_pipeline/tasks/main.yml b/playbooks/roles/analytics_pipeline/tasks/main.yml index ac1882fa05c..47469456b6b 100644 --- a/playbooks/roles/analytics_pipeline/tasks/main.yml +++ b/playbooks/roles/analytics_pipeline/tasks/main.yml @@ -135,35 +135,31 @@ - name: enable Hadoop services service: - name: "{{ item }}" + name: "{{ hadoop_common_services }}" enabled: yes - with_items: "{{ hadoop_common_services }}" tags: - install - install:configuration - name: start Hadoop services service: - name: "{{ item }}" + name: "{{ hadoop_common_services }}" state: started - with_items: "{{ hadoop_common_services }}" tags: - manage - manage:start - name: stop Hadoop services service: - name: "{{ item }}" + name: "{{ hadoop_common_services }}" state: stopped - with_items: "{{ hadoop_common_services }}" tags: - manage:stop - name: restart Hadoop services service: - name: "{{ item }}" + name: "{{ hadoop_common_services }}" state: restarted - with_items: "{{ hadoop_common_services }}" tags: - manage:start - manage:restart @@ -182,7 +178,7 @@ - install - install:app-requirements register: libcp - until: libcp|success + until: libcp is succeeded retries: 6 delay: 10 diff --git a/playbooks/roles/android_sdk/defaults/main.yml b/playbooks/roles/android_sdk/defaults/main.yml index e5b9d9bda5a..ae719265275 100644 --- a/playbooks/roles/android_sdk/defaults/main.yml +++ b/playbooks/roles/android_sdk/defaults/main.yml @@ -3,27 +3,40 @@ android_user: "android" android_group: "android" # Tarball to download -android_download: "android-sdk_r24.4.1-linux.tgz" + +# https://dl.google.com/android/repository/sdk-tools-linux-4333796.zip +android_download: "sdk-tools-linux-4333796.zip" + # Checksum of Android SDK (from: https://developer.android.com/studio/index.html#downloads) -android_checksum: "725bb360f0f7d04eaccff5a2d57abdd49061326d" +android_checksum: "8c7c28554a32318461802c1291d76fccfafde054" + # path to installed android sdk android_home: "/opt/android-sdk-linux" -# individual android build targets to be downloaded via the android sdk manager -android_build_targets: - - android-23 - - android-21 -# other android dependencies that cannot be tested via the android sdk manager. instead, stat the android_test_path -# to test for presence of the package + +# The SDK version used to compile the project | 6 | Android SDK Platform 28 +android_build_targets: "\"platforms;android-28\"" + +# other android dependencies that cannot be tested via the android sdk manager. instead, stat the android_test_path to test for presence of the package +# Plateform Tools | 6 | Android SDK Platform 28 +# The BuildTools | Android SDK Build-Tools 28.0.3 +# Additional components +# extras;google;m2repository | 58 | Google Repository +# extras;android;m2repository| 47.0.0 | Android Support Repository android_tools: - - { package: 'platform-tools', android_test_path: 'platform-tools' } - - { package: 'build-tools-23.0.3', android_test_path: 'build-tools/23.0.3' } - - { package: 'extra-google-m2repository', android_test_path: 'extras/google/m2repository' } - - { package: 'extra-android-m2repository', android_test_path: 'extras/android/m2repository' } - - { package: 'sys-img-armeabi-v7a-android-21', android_test_path: 'system-images/android-21/default/armeabi-v7a/' } -# - { package: 'sys-img-armeabi-v7a-android-23', android_test_path: 'system-images/android-23/default/armeabi-v7a/' } + - { package: "\"platform-tools\"", android_test_path: 'platform-tools' } + - { package: "\"build-tools;28.0.3\"", android_test_path: 'build-tools/28.0.3' } + - { package: "\"extras;google;m2repository\"", android_test_path: 'extras/google/m2repository' } + - { package: "\"extras;android;m2repository\"", android_test_path: 'extras/android/m2repository' } + +# Andriod Virtual Device Image +android_sys_image: "\"system-images;android-28;google_apis;x86\"" + # libraries needed for avd(android virtual device) emulation android_apt_libraries: - - lib32stdc++6 - - lib32z1 -android_sys_image_url: https://s3.amazonaws.com/edx-testeng-tools/android/android-sysimage-23.tar.gz -android_sys_image_checksum: a111ad559000e91e1d8d9d76df83a6341cc8cbfc3608077380ab15f17b5d0033 + - libglu1 + - libpulse0 + +# Lising files to get executable permissions +android_files: + - '/opt/android-sdk-linux/tools/bin/avdmanager' + - '/opt/android-sdk-linux/emulator/emulator' diff --git a/playbooks/roles/android_sdk/tasks/main.yml b/playbooks/roles/android_sdk/tasks/main.yml index d3c47cec02b..91bbe288530 100644 --- a/playbooks/roles/android_sdk/tasks/main.yml +++ b/playbooks/roles/android_sdk/tasks/main.yml @@ -13,30 +13,44 @@ # - pre-Ubuntu 16 releases do not contain the android sdk in their apt repos # - the existing ppas containing the sdk are questionable # - ubuntu-make did not seem reliable at the time of writing this -- name: Download the Android SDK tarball +- name: Download the Android SDK get_url: - url: "https://dl.google.com/android/{{ android_download }}" - dest: /tmp/android-sdk.tgz + url: "https://dl.google.com/android/repository/{{ android_download }}" + dest: /tmp/android-sdk.zip - name: Verify checksum of Android SDK - shell: "sha1sum /tmp/android-sdk.tgz" + shell: "sha1sum /tmp/android-sdk.zip" register: sdk_checksum - assert: that: "'{{ android_checksum }}' in sdk_checksum.stdout" -- name: Unarchive tarball to /opt/android-sdk-linux - unarchive: - copy: no - src: /tmp/android-sdk.tgz - dest: /opt - creates: "{{ android_home }}" +- name: Create Android Home directory + file: + path: "{{ android_home }}" + state: directory owner: "{{ android_user }}" group: "{{ android_group }}" - become: yes + mode: 0775 +# Unzip Android SDK and pipe the output to dev/null because unzipping archive creates a large amount of logging which fails the job +- name: Unzip the Android archive + shell: "unzip /tmp/android-sdk.zip -d {{ android_home }} > /dev/null" +- name: Change perms + shell: "chown -R {{ android_user }}:{{ android_group }} {{ android_home }}" + # Use the android sdk manager to install the build targets necessary for the edx mobile app -- name: Install Android API levels - shell: "echo 'y' | {{ android_home }}/tools/android update sdk -a --no-ui --filter {{ android_build_targets | join(',') }}" +- name: Install Android API levels + shell: "echo 'y' | {{ android_home }}/tools/bin/sdkmanager {{ android_build_targets }}" + become: yes + become_user: "{{ android_user }}" + +# Create repository file needed for sdkmanaer +- name: Create needed repository file + file: + path: "~/.android/repositories.cfg" + state: touch + become: yes become_user: "{{ android_user }}" + # Put Android package names into a list for easier installation command - name: Gather Android packages to download into a list util_map: @@ -47,16 +61,9 @@ register: android_packages # Use the android sdk manager to install the build tools necessary for the edx mobile app - name: Install other Android tools - shell: "echo 'y' | {{ android_home }}/tools/android update sdk -a --no-ui --filter {{ android_packages.function_output | join(',') }}" + shell: "echo 'y' | {{ android_home }}/tools/bin/sdkmanager {{ android_packages.function_output | join(' ') }} > /dev/null" become: yes become_user: "{{ android_user }}" -# The following libraries are only needed to run AVD emulation, not for compiling -- name: Install additional libraries used for Android emulation - apt: - name: "{{ item }}" - update_cache: yes - state: present - with_items: "{{ android_apt_libraries }}" # Link adb to tools, where all the rest of the android tools are - name: Add symlink for adb file: @@ -65,25 +72,27 @@ state: link owner: "{{ android_user }}" group: "{{ android_group }}" -# TEMPORARY FIX TO https://code.google.com/p/android/issues/detail?id=228113 -# The version of the Android ARM system image used by the mobile team for screenshot -# testing is currently unavailable. In the meantime, download a cached version on -# s3. -# Download cached version of Android Sys image, because it is no longer available -# via the Android SDK -- name: Download cached version of Android Sys Image 23 from s3 - shell: "curl -L {{ android_sys_image_url }} -o /var/tmp/android-sysimage-23.tar.gz" - args: - creates: /var/tmp/android-sysimage-23.tar.gz -- name: Verify checksum of downloaded android tarball - shell: "sha256sum /var/tmp/android-sysimage-23.tar.gz" - register: android_sys_image_download_checksum -- assert: - that: - "'{{ android_sys_image_checksum }}' in android_sys_image_download_checksum.stdout" -- name: Unzip Android system image - unarchive: - src: /var/tmp/android-sysimage-23.tar.gz - dest: "{{ android_home }}/system-images" - creates: "{{ android_home }}/system-images/android-23" - copy: no +# The following libraries are only needed to run AVD emulation, not for compiling +- name: Install additional libraries used for Android emulation + apt: + name: "{{ item }}" + update_cache: yes + state: present + with_items: "{{ android_apt_libraries }}" +# Download Android System Image to run emulator +- name: Download Android System Image + shell: "echo 'y' | {{ android_home }}/tools/bin/sdkmanager {{ android_sys_image }} > /dev/null" + become: yes + become_user: "{{ android_user }}" + +- name: Adding executable permissions + file: + path: "{{ item }}" + mode: 0655 + with_items: "{{ android_files }}" + +- name: Install zip utility + apt: + name: "zip" + state: present + update_cache: yes \ No newline at end of file diff --git a/playbooks/roles/ansible-role-django-ida/tasks/main.yml b/playbooks/roles/ansible-role-django-ida/tasks/main.yml index dd117d24b3f..671e6e39175 100644 --- a/playbooks/roles/ansible-role-django-ida/tasks/main.yml +++ b/playbooks/roles/ansible-role-django-ida/tasks/main.yml @@ -10,7 +10,7 @@ - name: Create docker directories file: - path: "../docker/{{ item }}/{{ my_role_name }}" + path: '../docker/{{ item }}/{{ my_role_name|replace("_","-") }}' state: directory with_items: - build @@ -48,7 +48,7 @@ - name: Update role templates template: - src: "templates/{{ item.src }}" + src: "templates/templates/{{ item.src }}" dest: "roles/{{ my_role_name }}/templates/{{ item.dest }}" with_items: - { src: 'edx/app/supervisor/conf.d.available/ROLE_NAME.conf.j2', dest: 'edx/app/supervisor/conf.d.available/{{ my_role_name }}.conf.j2'} diff --git a/playbooks/roles/ansible-role-django-ida/templates/defaults/main.yml.j2 b/playbooks/roles/ansible-role-django-ida/templates/defaults/main.yml.j2 index 1f4cc30ac52..1eefaf5c425 100644 --- a/playbooks/roles/ansible-role-django-ida/templates/defaults/main.yml.j2 +++ b/playbooks/roles/ansible-role-django-ida/templates/defaults/main.yml.j2 @@ -2,7 +2,7 @@ {% include 'roles/ansible-role/templates/header.j2' %} # # Defaults for role {{ role_name }} -# +# {{ role_name|upper }}_GIT_IDENTITY: !!null # depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC @@ -47,15 +47,17 @@ {{ role_name|upper }}_URL_ROOT: 'http://{{ role_name }}:18{{ port_suffix }}' {{ role_name|upper }}_LOGOUT_URL: '{{ '{{' }} {{ role_name|upper }}_URL_ROOT }}/logout/' {{ role_name|upper }}_OAUTH_URL_ROOT: '{{ EDXAPP_LMS_ISSUER | default("http://127.0.0.1:8000/oauth2") }}' -{{ role_name|upper }}_OIDC_LOGOUT_URL: '{{ EDXAPP_LMS_ROOT_URL | default("http://127.0.0.1:8000") }}/logout' +{{ role_name|upper }}_OAUTH2_LOGOUT_URL: '{{ EDXAPP_LMS_ROOT_URL | default("http://127.0.0.1:8000") }}/logout' {{ role_name|upper }}_SECRET_KEY: 'Your secret key here' {{ role_name|upper }}_TIME_ZONE: 'UTC' {{ role_name|upper }}_LANGUAGE_CODE: 'en-us' # Used to automatically configure OAuth2 Client -{{ role_name|upper }}_SOCIAL_AUTH_EDX_OIDC_KEY : '{{ role_name|lower }}-key' -{{ role_name|upper }}_SOCIAL_AUTH_EDX_OIDC_SECRET : '{{ role_name|lower }}-secret' +{{ role_name|upper }}_SOCIAL_AUTH_EDX_OAUTH2_KEY : '{{ role_name|lower }}-sso-key' +{{ role_name|upper }}_SOCIAL_AUTH_EDX_OAUTH2_SECRET : '{{ role_name|lower }}-sso-secret' +{{ role_name|upper }}_BACKEND_SERVICE_EDX_OAUTH2_KEY : '{{ role_name|lower }}-backend-service-key' +{{ role_name|upper }}_BACKEND_SERVICE_EDX_OAUTH2_SECRET : '{{ role_name|lower }}-backend-service-secret' {{ role_name|upper }}_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false {{ role_name|upper }}_PLATFORM_NAME: 'Your Platform Name Here' @@ -65,12 +67,13 @@ TIME_ZONE: '{{ '{{' }} {{ role_name|upper }}_TIME_ZONE }}' LANGUAGE_CODE: '{{ '{{' }} {{ role_name|upper }}_LANGUAGE_CODE }}' - SOCIAL_AUTH_EDX_OIDC_KEY: '{{ '{{' }} {{ role_name|upper }}_SOCIAL_AUTH_EDX_OIDC_KEY }}' - SOCIAL_AUTH_EDX_OIDC_SECRET: '{{ '{{' }} {{ role_name|upper }}_SOCIAL_AUTH_EDX_OIDC_SECRET }}' - SOCIAL_AUTH_EDX_OIDC_ID_TOKEN_DECRYPTION_KEY: '{{ '{{' }} {{ role_name|upper }}_SOCIAL_AUTH_EDX_OIDC_SECRET }}' - SOCIAL_AUTH_EDX_OIDC_URL_ROOT: '{{ '{{' }} {{ role_name|upper }}_OAUTH_URL_ROOT }}' + SOCIAL_AUTH_EDX_OAUTH2_KEY: '{{ '{{' }} {{ role_name|upper }}_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + SOCIAL_AUTH_EDX_OAUTH2_SECRET: '{{ '{{' }} {{ role_name|upper }}_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + BACKEND_SERVICE_EDX_OAUTH2_KEY: '{{ '{{' }} {{ role_name|upper }}_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + BACKEND_SERVICE_EDX_OAUTH2_SECRET: '{{ '{{' }} {{ role_name|upper }}_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: '{{ '{{' }} {{ role_name|upper }}_OAUTH_URL_ROOT }}' + SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: '{{ '{{' }} {{ role_name|upper }}_OAUTH2_LOGOUT_URL }}' SOCIAL_AUTH_REDIRECT_IS_HTTPS: '{{ '{{' }} {{ role_name|upper }}_SOCIAL_AUTH_REDIRECT_IS_HTTPS }}' - SOCIAL_AUTH_EDX_OIDC_LOGOUT_URL: '{{ '{{' }} {{ role_name|upper }}_OIDC_LOGOUT_URL }}' STATIC_ROOT: "{{ '{{' }} COMMON_DATA_DIR }}/{{ '{{' }} {{ role_name }}_service_name }}/staticfiles" # db config diff --git a/playbooks/roles/ansible-role-django-ida/templates/docker/build/ROLE_NAME/Dockerfile.j2 b/playbooks/roles/ansible-role-django-ida/templates/docker/build/ROLE_NAME/Dockerfile.j2 index 7c186407005..1b0103a8b40 100644 --- a/playbooks/roles/ansible-role-django-ida/templates/docker/build/ROLE_NAME/Dockerfile.j2 +++ b/playbooks/roles/ansible-role-django-ida/templates/docker/build/ROLE_NAME/Dockerfile.j2 @@ -25,5 +25,6 @@ RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook {{ role_nam --extra-vars="{{ role_name|upper }}_VERSION=${{ role_name|upper }}_VERSION" \ --extra-vars="COMMON_GIT_PATH=$REPO_OWNER" -USER root -CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"] +USER root +ENTRYPOINT ["/edx/app/edxapp/devstack.sh"] +CMD ["start"] diff --git a/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/ROLE_NAME.sh.j2 b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/ROLE_NAME.sh.j2 index 8ede162d8db..0bcbab51303 100644 --- a/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/ROLE_NAME.sh.j2 +++ b/playbooks/roles/ansible-role-django-ida/templates/templates/edx/app/ROLE_NAME/ROLE_NAME.sh.j2 @@ -15,4 +15,5 @@ export NEW_RELIC_LICENSE_KEY="{{ '{{' }} NEWRELIC_LICENSE_KEY }}" {{ '{%' }} endif -%} source {{ '{{' }} {{ role_name }}_home }}/{{ role_name }}_env -{{ '{{' }} executable }} -c {{ '{{' }} {{ role_name }}_home }}/{{ role_name }}_gunicorn.py {{ '{{' }} {{ role_name|upper }}_GUNICORN_EXTRA }} {{ role_name }}.wsgi:application +# We exec so that gunicorn is the child of supervisor and can be managed properly +exec {{ '{{' }} executable }} -c {{ '{{' }} {{ role_name }}_home }}/{{ role_name }}_gunicorn.py {{ '{{' }} {{ role_name|upper }}_GUNICORN_EXTRA }} {{ role_name }}.wsgi:application diff --git a/playbooks/roles/ansible-role/tasks/main.yml b/playbooks/roles/ansible-role/tasks/main.yml index ca0bd1bf8e7..554e5660324 100644 --- a/playbooks/roles/ansible-role/tasks/main.yml +++ b/playbooks/roles/ansible-role/tasks/main.yml @@ -6,7 +6,7 @@ - name: Prompt for overwrite pause: prompt="Role {{ my_role_name }} exists. Overwrite? Touch any key to continue or -c, then a, to abort." - when: role_exists | success + when: role_exists is succeeded - name: Create role directories file: @@ -26,4 +26,4 @@ with_items: - tasks - meta - - defaults \ No newline at end of file + - defaults diff --git a/playbooks/roles/automated/defaults/main.yml b/playbooks/roles/automated/defaults/main.yml index a51454b255d..3d764e347eb 100644 --- a/playbooks/roles/automated/defaults/main.yml +++ b/playbooks/roles/automated/defaults/main.yml @@ -13,7 +13,7 @@ automated_role_name: automated AUTOMATED_USER: "changeme" -automated_sudoers_template: "roles/automated/templates/99-automated.j2" +automated_sudoers_template: "99-automated.j2" # # OS packages diff --git a/playbooks/roles/automated/tasks/main.yml b/playbooks/roles/automated/tasks/main.yml index dcf67bc79dc..6a044bb7aec 100644 --- a/playbooks/roles/automated/tasks/main.yml +++ b/playbooks/roles/automated/tasks/main.yml @@ -26,16 +26,16 @@ # EDXAPP_AUTOMATED_USERS: # ecom: # sudo_commands: -# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py lms showmigrations --settings=aws" +# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py lms showmigrations --settings=production" # sudo_user: "edxapp" -# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py cms showmigrations --settings=aws" +# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py cms showmigrations --settings=production" # sudo_user: "edxapp" # authorized_keys: # - 'ssh-rsa ecom+admin@example.com' # - 'ssh-rsa ecom+devel@example.com' # analytics: # sudo_commands: -# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py lms help --settings=aws" +# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py lms help --settings=production" # sudo_user: "edxapp" # authorized_keys: ['ssh-rsa analytics@example.com'] # @@ -89,3 +89,22 @@ owner: "{{ item.key }}" group: "{{ item.key }}" with_dict: "{{ AUTOMATED_USERS }}" + +- name: Are we in a Docker container + shell: echo $(egrep -q 'docker' /proc/self/cgroup && echo 'yes' || echo 'no') + ignore_errors: yes + register: docker_container + +- name: Allow automated users to ssh + lineinfile: + dest: /etc/ssh/sshd_config + regexp: '#automated_users_allowed_to_ssh' + line: "AllowUsers {{ AUTOMATED_USERS.keys() | list | join(' ') }} #automated_users_allowed_to_ssh" + when: ( AUTOMATED_USERS|length > 0 ) and docker_container.stdout != 'yes' + register: users_ssh_access + +- name: restart ssh + service: + name: ssh + state: restarted + when: users_ssh_access.changed diff --git a/playbooks/roles/automated/templates/99-automated.j2 b/playbooks/roles/automated/templates/99-automated.j2 index 03cdbdeabe0..76efbee290b 100644 --- a/playbooks/roles/automated/templates/99-automated.j2 +++ b/playbooks/roles/automated/templates/99-automated.j2 @@ -1,3 +1,3 @@ {% for command in item.value.sudo_commands %} -{{ item.key }} ALL=({{ command.sudo_user }}) SETENV:NOPASSWD:{{ command.command }} +{{ item.key }} ALL=({{ command.sudo_user }}) SETENV:NOPASSWD:{{ command.command | replace('\'', '') }} {% endfor %} diff --git a/playbooks/roles/aws/defaults/main.yml b/playbooks/roles/aws/defaults/main.yml index 556c32c6c20..38781b83f55 100644 --- a/playbooks/roles/aws/defaults/main.yml +++ b/playbooks/roles/aws/defaults/main.yml @@ -33,14 +33,25 @@ aws_s3_sync_script: "{{ vhost_dirs.home.path }}/send-logs-to-s3" aws_s3_logfile: "{{ vhost_dirs.logs.path }}/s3-log-sync.log" aws_region: "us-east-1" # default path to the aws binary -aws_s3cmd: "/usr/local/bin/s3cmd" +aws_s3cmd: "/usr/bin/s3cmd" aws_cmd: "/usr/local/bin/aws" aws_requirements: "{{ vhost_dirs.home.path }}/requirements.txt" + # # OS packages # aws_debian_pkgs: - python-setuptools + - s3cmd aws_redhat_pkgs: [] + +# The AWS_GATHER_FACTS switch is used to enable/disable data gathering +# from ec2 instances. +# This is needed in some deployments were S3 is being used for file storage but +# the appserver is in another cloud provider, such as OpenStack. +# This issues started happening after the ec2_facts role was replaced with +# the new version `ec2_metadata_facts` that fails when the server is not +# on AWS, unlike its older counterpart +AWS_GATHER_FACTS: true diff --git a/playbooks/roles/aws/tasks/main.yml b/playbooks/roles/aws/tasks/main.yml index f6202847c94..959579ee9d4 100644 --- a/playbooks/roles/aws/tasks/main.yml +++ b/playbooks/roles/aws/tasks/main.yml @@ -25,8 +25,9 @@ # Start dealing with Jumbo frames issue in mixed MTU deployements in AWS # - name: Gather ec2 facts for use in other roles - action: ec2_facts + action: ec2_metadata_facts no_log: True + when: AWS_GATHER_FACTS tags: - deploy when: cloud_provider == "aws" # Appsembler-specific @@ -49,10 +50,9 @@ - name: Install system packages apt: - name: "{{ item }}" + name: "{{ aws_debian_pkgs }}" state: present update_cache: yes - with_items: "{{ aws_debian_pkgs }}" when: ansible_distribution in common_debian_variants - name: Copy the Python requirements file diff --git a/playbooks/roles/aws/templates/requirements.txt.j2 b/playbooks/roles/aws/templates/requirements.txt.j2 index 146823e6db6..9dd4a9f460a 100644 --- a/playbooks/roles/aws/templates/requirements.txt.j2 +++ b/playbooks/roles/aws/templates/requirements.txt.j2 @@ -4,18 +4,19 @@ # # make upgrade # -awscli==1.11.58 -boto==2.48.0 -botocore==1.5.21 # via awscli, s3transfer -colorama==0.3.7 # via awscli -docutils==0.14 # via awscli, botocore -futures==3.2.0 ; python_version == "2.7" -jmespath==0.9.3 # via botocore -pyasn1==0.4.2 # via rsa -python-dateutil==2.7.3 # via botocore, s3cmd +awscli==1.18.50 # via -r requirements/aws.in +boto3==1.13.0 # via -r requirements/aws.in +boto==2.49.0 # via -r requirements/aws.in +botocore==1.16.0 # via awscli, boto3, s3transfer +colorama==0.4.3 # via awscli +docutils==0.15.2 # via awscli, botocore +jmespath==0.9.5 # via boto3, botocore +pyasn1==0.4.8 # via rsa +python-dateutil==2.8.1 # via botocore, s3cmd python-magic==0.4.15 # via s3cmd -pyyaml==3.12 # via awscli +pyyaml==3.11 # via -r requirements/aws.in, awscli rsa==3.4.2 # via awscli -s3cmd==1.6.1 -s3transfer==0.1.13 # via awscli -six==1.11.0 # via python-dateutil +s3cmd==2.1.0 # via -r requirements/aws.in +s3transfer==0.3.3 # via awscli, boto3 +six==1.14.0 # via python-dateutil +urllib3==1.25.9 # via botocore diff --git a/playbooks/roles/blockstore/defaults/main.yml b/playbooks/roles/blockstore/defaults/main.yml new file mode 100644 index 00000000000..ebbee2faf47 --- /dev/null +++ b/playbooks/roles/blockstore/defaults/main.yml @@ -0,0 +1,76 @@ +--- +# Role to deploy Blockstore, the next-generation Open edX Learning Object Repository +# +# github: https://github.com/edx/blockstore +# + +blockstore_service_name: 'blockstore' +blockstore_user: '{{ blockstore_service_name }}' +blockstore_home: '{{ COMMON_APP_DIR }}/{{ blockstore_service_name }}' +blockstore_code_dir: '{{ blockstore_home }}/{{ blockstore_service_name }}' +blockstore_venv_dir: '{{ blockstore_home }}/venvs/{{ blockstore_service_name }}' + +BLOCKSTORE_GIT_PATH: 'edx' +BLOCKSTORE_VERSION: 'master' +BLOCKSTORE_GIT_IDENTITY: !!null + +BLOCKSTORE_REPOS: + - PROTOCOL: '{{ COMMON_GIT_PROTOCOL }}' + DOMAIN: '{{ COMMON_GIT_MIRROR }}' + PATH: '{{ BLOCKSTORE_GIT_PATH }}' + REPO: 'blockstore.git' + VERSION: '{{ BLOCKSTORE_VERSION }}' + DESTINATION: '{{ blockstore_code_dir }}' + SSH_KEY: '{{ BLOCKSTORE_GIT_IDENTITY }}' + +blockstore_gunicorn_host: '127.0.0.1' +blockstore_gunicorn_port: '8250' +blockstore_gunicorn_timeout: '300' + +BLOCKSTORE_GUNICORN_WORKERS: '2' +BLOCKSTORE_GUNICORN_EXTRA: '' +BLOCKSTORE_GUNICORN_EXTRA_CONF: '' +BLOCKSTORE_GUNICORN_WORKER_CLASS: 'gevent' +BLOCKSTORE_GUNICORN_MAX_REQUESTS: null + +BLOCKSTORE_NGINX_HOSTNAME: '~^((stage|prod)-)?{{ blockstore_service_name }}.*' +BLOCKSTORE_NGINX_PORT: '1{{ blockstore_gunicorn_port }}' +BLOCKSTORE_SSL_NGINX_PORT: '4{{ blockstore_gunicorn_port }}' + +BLOCKSTORE_DEFAULT_DB_NAME: 'blockstore' +BLOCKSTORE_DATABASE_USER: 'blkstr01' +BLOCKSTORE_DATABASE_PASSWORD: 'password' +BLOCKSTORE_DATABASE_HOST: 'localhost' +BLOCKSTORE_DATABASE_PORT: 3306 +BLOCKSTORE_DATABASE_CONN_MAX_AGE: 60 + +BLOCKSTORE_DJANGO_SETTINGS_MODULE: 'blockstore.settings.production' +BLOCKSTORE_SECRET_KEY: !!null + + +# See edx_django_service_automated_users for an example of what this should be +BLOCKSTORE_AUTOMATED_USERS: {} + +# Rather than adding extra wiring for each var under here. +# Just override this whole config dictionary +BLOCKSTORE_SERVICE_CONFIG_OVERRIDES: + BLOCKSTORE_URL_ROOT: 'http://localhost:{{ blockstore_gunicorn_port }}' + +blockstore_environment: + BLOCKSTORE_CFG: '{{ COMMON_CFG_DIR }}/{{ blockstore_service_name }}.yml' + VIRTUAL_ENV: '{{ blockstore_venv_dir }}' + +# +# OS packages +# + +blockstore_debian_pkgs: + - libmysqlclient-dev + - libjpeg-dev + - libssl-dev + - libffi-dev + +blockstore_redhat_pkgs: [] + +BLOCKSTORE_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +BLOCKSTORE_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" diff --git a/playbooks/roles/blockstore/meta/main.yml b/playbooks/roles/blockstore/meta/main.yml new file mode 100644 index 00000000000..7cd1129d1ea --- /dev/null +++ b/playbooks/roles/blockstore/meta/main.yml @@ -0,0 +1,39 @@ +--- +# Role to deploy Blockstore, the next-generation Open edX Learning Object Repository +# +# github: https://github.com/edx/blockstore +# +## +# Role includes for role blockstore +# +dependencies: + - role: edx_django_service + edx_django_service_use_python3: true + edx_django_service_name: '{{ blockstore_service_name }}' + edx_django_service_user: '{{ blockstore_user }}' + edx_django_service_home: '{{ COMMON_APP_DIR }}/{{ blockstore_service_name }}' + edx_django_service_repos: '{{ BLOCKSTORE_REPOS }}' + edx_django_service_version: '{{ BLOCKSTORE_VERSION }}' + edx_django_service_gunicorn_port: '{{ blockstore_gunicorn_port }}' + edx_django_service_gunicorn_extra: '{{ BLOCKSTORE_GUNICORN_EXTRA }}' + edx_django_service_gunicorn_worker_class: '{{ BLOCKSTORE_GUNICORN_WORKER_CLASS }}' + edx_django_service_gunicorn_max_requests: '{{ BLOCKSTORE_GUNICORN_MAX_REQUESTS }}' + edx_django_service_hostname: '{{ BLOCKSTORE_NGINX_HOSTNAME }}' + edx_django_service_max_webserver_upload: 10 + edx_django_service_nginx_port: '{{ BLOCKSTORE_NGINX_PORT }}' + edx_django_service_ssl_nginx_port: '{{ BLOCKSTORE_SSL_NGINX_PORT }}' + edx_django_service_default_db_name: '{{ BLOCKSTORE_DEFAULT_DB_NAME }}' + edx_django_service_db_user: '{{ BLOCKSTORE_DATABASE_USER }}' + edx_django_service_db_password: '{{ BLOCKSTORE_DATABASE_PASSWORD }}' + edx_django_service_default_db_host: '{{ BLOCKSTORE_DATABASE_HOST }}' + edx_django_service_default_db_atomic_requests: true + edx_django_service_default_db_conn_max_age: '{{ BLOCKSTORE_DATABASE_CONN_MAX_AGE }}' + edx_django_service_django_settings_module: '{{ BLOCKSTORE_DJANGO_SETTINGS_MODULE }}' + edx_django_service_secret_key: '{{ BLOCKSTORE_SECRET_KEY }}' + edx_django_service_automated_users: '{{ BLOCKSTORE_AUTOMATED_USERS }}' + edx_django_service_config_overrides: '{{ BLOCKSTORE_SERVICE_CONFIG_OVERRIDES }}' + edx_django_service_environment_extra: '{{ blockstore_environment }}' + edx_django_service_debian_pkgs_extra: '{{ blockstore_debian_pkgs }}' + edx_django_service_has_static_assets: true + edx_django_service_decrypt_config_enabled: '{{ BLOCKSTORE_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ BLOCKSTORE_COPY_CONFIG_ENABLED }}' diff --git a/playbooks/roles/blockstore/tasks/main.yml b/playbooks/roles/blockstore/tasks/main.yml new file mode 100644 index 00000000000..db8ffcb09db --- /dev/null +++ b/playbooks/roles/blockstore/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# Role to deploy Blockstore, the next-generation Open edX Learning Object Repository +# +# github: https://github.com/edx/blockstore +# +# +# Tasks for role blockstore +# +# Overview: This role's tasks come from edx_django_service. +# +# +# Dependencies: +# +# +# Example play: +# +# diff --git a/playbooks/roles/browsers/defaults/main.yml b/playbooks/roles/browsers/defaults/main.yml index d5aca060423..5f030840119 100644 --- a/playbooks/roles/browsers/defaults/main.yml +++ b/playbooks/roles/browsers/defaults/main.yml @@ -20,28 +20,29 @@ browser_deb_pkgs: - xvfb # Firefox for Xenial -firefox_version: version 59.* +FIREFOX_VERSION: version 59.* +firefox_version: "{{ FIREFOX_VERSION }}" # Packages we host in S3 to ensure correct browser version Both Chrome and # FireFox update their apt repos with the latest version, which often causes # spurious acceptance test failures. browser_s3_deb_pkgs: - - name: firefox_59.0.2+build1-0ubuntu0.16.04.1_amd64.deb - url: https://s3.amazonaws.com/vagrant.testeng.edx.org/firefox_59.0.2%2Bbuild1-0ubuntu0.16.04.1_amd64.deb - - name: google-chrome-stable_55.0.2883.87-1_amd64.deb - url: https://s3.amazonaws.com/vagrant.testeng.edx.org/google-chrome-stable_55.0.2883.87-1_amd64.deb + - name: firefox_61.0.1+build1-0ubuntu0.16.04.1_amd64.deb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/firefox_61.0.1%2Bbuild1-0ubuntu0.16.04.1_amd64.deb + - name: google-chrome-stable_68.0.3440.84-1_amd64.deb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/google-chrome-stable_68.0.3440.84-1_amd64.deb trusty_browser_s3_deb_pkgs: - name: firefox-mozilla-build_42.0-0ubuntu1_amd64.deb url: https://s3.amazonaws.com/vagrant.testeng.edx.org/firefox-mozilla-build_42.0-0ubuntu1_amd64.deb - - name: google-chrome-stable_59.0.3071.115-1_amd64.deb - url: https://s3.amazonaws.com/vagrant.testeng.edx.org/google-chrome-stable_59.0.3071.115-1_amd64.deb + - name: google-chrome-stable_68.0.3440.84-1_amd64.deb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/google-chrome-stable_68.0.3440.84-1_amd64.deb # GeckoDriver -geckodriver_url: "https://github.com/mozilla/geckodriver/releases/download/v0.20.0/geckodriver-v0.20.0-linux64.tar.gz" +geckodriver_url: "https://github.com/mozilla/geckodriver/releases/download/v0.21.0/geckodriver-v0.21.0-linux64.tar.gz" # ChromeDriver -chromedriver_version: 2.27 +chromedriver_version: 2.41 chromedriver_url: "http://chromedriver.storage.googleapis.com/{{ chromedriver_version }}/chromedriver_linux64.zip" # PhantomJS diff --git a/playbooks/roles/browsers/tasks/main.yml b/playbooks/roles/browsers/tasks/main.yml index 58eec9bd2a8..8c40df638d7 100644 --- a/playbooks/roles/browsers/tasks/main.yml +++ b/playbooks/roles/browsers/tasks/main.yml @@ -126,6 +126,9 @@ src: "../files/geckodriver" dest: "/usr/local/bin/geckodriver" mode: 0755 + tags: + - install + - install:system-requirements - assert: that: diff --git a/playbooks/roles/cassandra/defaults/main.yml b/playbooks/roles/cassandra/defaults/main.yml index 1870fe9161a..8e849b35297 100644 --- a/playbooks/roles/cassandra/defaults/main.yml +++ b/playbooks/roles/cassandra/defaults/main.yml @@ -1,5 +1,6 @@ --- -cassandra_version: "2.0.14" +CASSANDRA_VERSION: "2.0.14" +cassandra_version: "{{ CASSANDRA_VERSION }}" # AFAIK there's no way to detect instance storage after the instaces has started. # Therefore, you MUST know the device names ahead of time. @@ -30,4 +31,4 @@ cassandra_auto_bootstrap: false # For single-node locally-accessible deployments only! Otherwise, use: # cassandra_listen_address: "" -cassandra_listen_address: localhost \ No newline at end of file +cassandra_listen_address: localhost diff --git a/playbooks/roles/cassandra/tasks/main.yml b/playbooks/roles/cassandra/tasks/main.yml index 2b474c0051d..40f8894e0c6 100644 --- a/playbooks/roles/cassandra/tasks/main.yml +++ b/playbooks/roles/cassandra/tasks/main.yml @@ -38,7 +38,7 @@ src: "{{ item.0 }}" fstype: ext4 state: mounted - with_together: + with_together: - cassandra_ephemeral_disks - cassandra_data_dirs when: cassandra_ephemeral_disks diff --git a/playbooks/roles/certs/defaults/main.yml b/playbooks/roles/certs/defaults/main.yml index 599c858ae42..255136dacf1 100644 --- a/playbooks/roles/certs/defaults/main.yml +++ b/playbooks/roles/certs/defaults/main.yml @@ -19,7 +19,7 @@ CERTS_XQUEUE_AUTH_USER: "{{ COMMON_HTPASSWD_USER }}" CERTS_XQUEUE_AUTH_PASS: "{{ COMMON_HTPASSWD_PASS }}" # credentials for connecting to the xqueue server CERTS_QUEUE_USER: "lms" -CERTS_QUEUE_PASS: "password" +CERTS_QUEUE_PASS: "{{ COMMON_XQUEUE_LMS_PASSWORD }}" # AWS credentials for certificate upload CERTS_AWS_KEY: "" CERTS_AWS_ID: "" @@ -65,10 +65,10 @@ certs_code_dir: "{{ certs_app_dir }}/certificates" certs_venvs_dir: "{{ certs_app_dir }}/venvs" certs_venv_dir: "{{ certs_venvs_dir }}/certs" certs_venv_bin: "{{ certs_venv_dir }}/bin" -certs_git_ssh: /tmp/git_ssh.sh certs_git_identity: "{{ certs_app_dir }}/certs-git-identity" -certs_requirements_file: "{{ certs_code_dir }}/requirements.txt" -certs_version: 'master' +certs_requirements_file: "{{ certs_code_dir }}/requirements/base.txt" +CERTS_VERSION: 'master' +certs_version: "{{ CERTS_VERSION }}" certs_gpg_dir: "{{ certs_app_dir }}/gnupg" certs_env_config: # CERTS_DATA is legacy, not used diff --git a/playbooks/roles/certs/tasks/deploy.yml b/playbooks/roles/certs/tasks/deploy.yml index d84184ca2cf..a4a046d4792 100644 --- a/playbooks/roles/certs/tasks/deploy.yml +++ b/playbooks/roles/certs/tasks/deploy.yml @@ -35,13 +35,6 @@ mode: "0644" when: not disable_edx_services -- name: Create ssh script for git - template: - src: "{{ certs_git_ssh | basename }}.j2" - dest: "{{ certs_git_ssh }}" - owner: "{{ certs_user }}" - mode: "0750" - # This key is only needed if you are pulling down a private # certificates repo - name: Install read-only ssh key for the certs repo @@ -59,9 +52,8 @@ repo: "{{ CERTS_REPO }}" version: "{{ certs_version }}" accept_hostkey: yes + key_file: "{{ certs_git_identity }}" become_user: "{{ certs_user }}" - environment: - GIT_SSH: "{{ certs_git_ssh }}" register: certs_checkout when: CERTS_GIT_IDENTITY != "none" diff --git a/playbooks/roles/certs/tasks/tag_ec2.yml b/playbooks/roles/certs/tasks/tag_ec2.yml index 77cf0550dee..6a6eef327e6 100644 --- a/playbooks/roles/certs/tasks/tag_ec2.yml +++ b/playbooks/roles/certs/tasks/tag_ec2.yml @@ -1,7 +1,7 @@ --- - name: Get instance information - action: ec2_facts + action: ec2_metadata_facts - name: Tag instance ec2_tag: diff --git a/playbooks/roles/certs/templates/git_ssh.sh.j2 b/playbooks/roles/certs/templates/git_ssh.sh.j2 deleted file mode 100644 index 8ecdc9cb89d..00000000000 --- a/playbooks/roles/certs/templates/git_ssh.sh.j2 +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ certs_git_identity }} "$@" diff --git a/playbooks/roles/codejail/defaults/main.yml b/playbooks/roles/codejail/defaults/main.yml new file mode 100644 index 00000000000..c24a6f4a90d --- /dev/null +++ b/playbooks/roles/codejail/defaults/main.yml @@ -0,0 +1,13 @@ +--- +codejail_debian_packages: + - apparmor-utils +CODEJAIL_PYTHON_VERSIONS: + - python2.7 + - python3.5 + - python3.6 +codejail_python_versions: "{{ CODEJAIL_PYTHON_VERSIONS }}" +codejail_sandbox_user: 'sandbox' +codejail_sandbox_group: 'sandbox' +codejail_sandbox_name_base: 'codejail_sandbox' +codejail_sandbox_env: '/home/{{ codejail_sandbox_user }}/{{ codejail_sandbox_name_base }}' +codejail_sandbox_caller: 'ubuntu' diff --git a/playbooks/roles/codejail/tasks/main.yml b/playbooks/roles/codejail/tasks/main.yml new file mode 100644 index 00000000000..c629fad5b0d --- /dev/null +++ b/playbooks/roles/codejail/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- name: Install codejail specific system packages + apt: + name: '{{ item }}' + state: present + update_cache: yes + with_items: '{{ codejail_debian_packages }}' +- name: Create group for sandbox user + group: + name: '{{ codejail_sandbox_group }}' + state: present + system: yes +- name: Create sandbox user + user: + name: '{{ codejail_sandbox_user }}' + group: '{{ codejail_sandbox_group }}' + state: present +- name: Create sandboxed virtual environments for every Python installation + shell: "virtualenv -p {{ item }} --always-copy --no-site-packages {{ codejail_sandbox_env }}-{{ item }}" + become: true + with_items: "{{ codejail_python_versions }}" +- name: Clone codejail repo + git: + repo: 'https://github.com/edx/codejail.git' + dest: '/tmp/codejail' + version: 'master' +- name: Install codejail sandbox dependencies + pip: + requirements: '/tmp/codejail/requirements/sandbox.txt' + virtualenv: "{{ codejail_sandbox_env }}-{{ item }}" + state: present + become: true + with_items: "{{ codejail_python_versions }}" +- name: Set permissions for sandboxed Python environments + file: + path: '{{ codejail_sandbox_env }}-{{ item }}' + recurse: yes + owner: '{{ codejail_sandbox_user }}' + group: '{{ codejail_sandbox_group }}' + with_items: "{{ codejail_python_versions }}" + become: true +- name: Template sudoers file + template: + src: "sudoers-template" + dest: "/etc/sudoers.d/01-sandbox" +- name: Create AppArmor profiles for each Python installation + template: + src: "apparmor-template" + dest: '/etc/apparmor.d/home.{{ codejail_sandbox_user }}.{{ codejail_sandbox_name_base }}-{{ item }}.bin.python' + with_items: "{{ codejail_python_versions }}" +- name: Parse AppArmor profiles + shell: 'apparmor_parser /etc/apparmor.d/home.{{ codejail_sandbox_user }}.{{ codejail_sandbox_name_base }}-{{ item }}.bin.python' + become: true + with_items: "{{ codejail_python_versions }}" +- name: Enforce AppArmor profile + shell: 'aa-enforce /etc/apparmor.d/home.{{ codejail_sandbox_user }}.{{ codejail_sandbox_name_base }}-{{ item }}.bin.python' + become: true + with_items: "{{ codejail_python_versions }}" diff --git a/playbooks/roles/codejail/templates/apparmor-template b/playbooks/roles/codejail/templates/apparmor-template new file mode 100644 index 00000000000..a7f4fb7d695 --- /dev/null +++ b/playbooks/roles/codejail/templates/apparmor-template @@ -0,0 +1,27 @@ +#include + +{{ codejail_sandbox_env }}-{{ item }}/bin/python { + #include + #include + + {{ codejail_sandbox_env }}-{{ item }}/** mr, + /tmp/codejail-*/ rix, + /tmp/codejail-*/** wrix, + + # Whitelist particiclar shared objects from the system + # python installation + # + /usr/lib/{{ item }}/lib-dynload/_json.so mr, + /usr/lib/{{ item }}/lib-dynload/_ctypes.so mr, + /usr/lib/{{ item }}/lib-dynload/_heapq.so mr, + /usr/lib/{{ item }}/lib-dynload/_io.so mr, + /usr/lib/{{ item }}/lib-dynload/_csv.so mr, + /usr/lib/{{ item }}/lib-dynload/datetime.so mr, + /usr/lib/{{ item }}/lib-dynload/_elementtree.so mr, + /usr/lib/{{ item }}/lib-dynload/pyexpat.so mr, + /usr/lib/{{ item }}/lib-dynload/future_builtins.so mr, + # + # Allow access to selections from /proc + # + /proc/*/mounts r, +} diff --git a/playbooks/roles/codejail/templates/sudoers-template b/playbooks/roles/codejail/templates/sudoers-template new file mode 100644 index 00000000000..c5fd7cc5542 --- /dev/null +++ b/playbooks/roles/codejail/templates/sudoers-template @@ -0,0 +1,11 @@ +{% for python_version in codejail_python_versions %} +{{ codejail_sandbox_caller }} ALL=({{ codejail_sandbox_user }}) SETENV:NOPASSWD:{{ codejail_sandbox_env }}-{{ python_version }}/bin/python +{% endfor %} +{{ codejail_sandbox_caller }} ALL=({{ codejail_sandbox_user }}) SETENV:NOPASSWD:/usr/bin/find +{{ codejail_sandbox_caller }} ALL=(ALL) NOPASSWD:/usr/bin/pkill + +{% for python_version in codejail_python_versions %} +Defaults!{{ codejail_sandbox_env }}-{{ python_version }}/bin/python !requiretty +{% endfor %} +Defaults!/usr/bin/find !requiretty +Defaults!/usr/bin/pkill !requiretty diff --git a/playbooks/roles/common/tasks/main.yml b/playbooks/roles/common/tasks/main.yml index 2147296758c..83fe27e512e 100644 --- a/playbooks/roles/common/tasks/main.yml +++ b/playbooks/roles/common/tasks/main.yml @@ -10,12 +10,22 @@ # ubuntu - stat: - path: "/usr/sbin/update-ca-certificates" + path: "{{ item }}" + with_items: + - "/usr/local/share/ca-certificates" + - "/usr/sbin/update-ca-certificates" register: update_ca_certificates +- name: Download digicert intermediate Certificate + get_url: + url: "{{ common_digicert_base_url }}/{{ common_digicert_name }}.pem" + dest: "/usr/local/share/ca-certificates/{{ common_digicert_name }}" + validate_certs: no + when: update_ca_certificates is defined and update_ca_certificates.results[0].stat.exists == True + - name: Update CA Certificates shell: /usr/sbin/update-ca-certificates - when: update_ca_certificates is defined and update_ca_certificates.stat.exists == True + when: update_ca_certificates is defined and update_ca_certificates.results[1].stat.exists == True # ec2-linux - stat: @@ -51,6 +61,14 @@ repo: "{{ common_git_ppa }}" when: ansible_distribution in common_debian_variants +- name: Add ppa for watchman package + apt_repository: + repo: "ppa:linuxuprising/apps" + when: > + ansible_distribution in common_debian_variants and + ({{ devstack | default(False) }} or {{ edx_django_service_is_devstack | default(False) }}) + tags: + - "devstack" # Ensure that we can install old software if need be. - name: Add edX PPA apt key @@ -59,7 +77,15 @@ keyserver: "{{ COMMON_EDX_PPA_KEY_SERVER }}" state: "present" when: > - ansible_distribution in common_debian_variants + ansible_distribution in common_debian_variants and + ansible_distribution_release != 'bionic' + +- name: Update expired apt keys + shell: apt-key adv --recv-keys --keyserver {{ COMMON_EDX_PPA_KEY_SERVER }} {{ COMMON_EDX_PPA_KEY_ID }} + when: > + ansible_distribution in common_debian_variants and + ansible_distribution_release != 'bionic' + - name: Add custom edX PPA # Ensure that we get the latest version of python 2.7 @@ -67,36 +93,48 @@ apt_repository: repo: "{{ COMMON_EDX_PPA }}" when: > - ansible_distribution in common_debian_variants + ansible_distribution in common_debian_variants and + ansible_distribution_release != 'bionic' - name: Install role-independent useful system packages # do this before log dir setup; rsyslog package guarantees syslog user present apt: - name: "{{ item }}" + name: "{{ common_debian_pkgs }}" install_recommends: yes state: present update_cache: yes - with_items: "{{ common_debian_pkgs }}" when: ansible_distribution in common_debian_variants +- name: Install role-independent packages useful for devstack. + apt: + name: "{{ common_debian_devstack_pkgs }}" + install_recommends: yes + state: present + update_cache: yes + when: > + ansible_distribution in common_debian_variants and + ({{ devstack | default(False) }} or {{ edx_django_service_is_devstack | default(False) }}) + tags: + - "devstack" + + - name: Install role-independent useful system packages from custom PPA apt: - name: "{{ item }}" + name: "{{ old_python_debian_pkgs }}" install_recommends: yes state: present update_cache: yes - with_items: "{{ old_python_debian_pkgs }}" when: > ansible_distribution in common_debian_variants and - ansible_distribution_release in old_python_ppa_releases + ansible_distribution_release in old_python_ppa_releases and + ansible_distribution_release != 'bionic' - name: Install role-independent useful system packages yum: - name: "{{ item }}" + name: "{{ common_redhat_pkgs }}" state: present update_cache: yes - with_items: "{{ common_redhat_pkgs }}" when: ansible_distribution in common_redhat_variants @@ -108,6 +146,10 @@ group: "{{ item.group | default('root') }}" mode: "0755" with_items: "{{ common_directories }}" + tags: + - install + - install:configuration + - common_directories - name: upload sudo config for key forwarding as root copy: @@ -120,10 +162,9 @@ - name: pip install virtualenv pip: - name: "{{ item }}" + name: "{{ common_pip_pkgs }}" state: present extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" - with_items: "{{ common_pip_pkgs }}" when: ansible_distribution in common_debian_variants @@ -145,7 +186,7 @@ shell: hostname -F /etc/hostname when: COMMON_HOSTNAME|length >0 and (etc_hosts.changed or etc_hostname.changed) -- name: Copy the templates to their respestive destination +- name: Copy the templates to their respective destination template: dest: "{{ item.dest }}" src: "{{ item.src }}" diff --git a/playbooks/roles/common_vars/defaults/main.yml b/playbooks/roles/common_vars/defaults/main.yml index bdb108bfe94..982878b611b 100644 --- a/playbooks/roles/common_vars/defaults/main.yml +++ b/playbooks/roles/common_vars/defaults/main.yml @@ -12,7 +12,7 @@ COMMON_BASIC_AUTH_EXCEPTIONS: - 172.16.0.0/12 # Settings to use for calls to edxapp manage.py -COMMON_EDXAPP_SETTINGS: 'aws' +COMMON_EDXAPP_SETTINGS: 'production' # Turn on syncing logs on rotation for edx # application and tracking logs, must also @@ -21,6 +21,7 @@ COMMON_OBJECT_STORE_LOG_SYNC: False COMMON_OBJECT_STORE_LOG_SYNC_BUCKET: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}" COMMON_OBJECT_STORE_LOG_SYNC_PREFIX: "logs/tracking/" COMMON_OBJECT_STORE_LOG_SYNC_SCRIPT: "{{ COMMON_BIN_DIR }}/send-logs-to-object-store" +COMMON_OBJECT_STORE_LOG_SYNC_ON_EXIT: "{{ COMMON_BIN_DIR }}/sync-logs-on-exit" COMMON_BASE_DIR: /edx COMMON_DATA_DIR: "{{ COMMON_BASE_DIR}}/var" @@ -53,6 +54,9 @@ COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple' COMMON_NPM_MIRROR_URL: 'https://registry.npmjs.org' COMMON_UBUNTU_APT_KEYSERVER: "http://keyserver.ubuntu.com/pks/lookup?op=get&fingerprint=on&search=" +common_digicert_name: "DigiCertSHA2SecureServerCA.crt" +common_digicert_base_url: "https://dl.cacerts.digicert.com/" + COMMON_EDX_PPA: "deb http://ppa.edx.org {{ ansible_distribution_release }} main" COMMON_EDX_PPA_KEY_SERVER: "keyserver.ubuntu.com" COMMON_EDX_PPA_KEY_ID: "69464050" @@ -65,6 +69,7 @@ COMMON_GIT_PATH: 'edx' # git path prefix # override this var to set a different hostname COMMON_HOSTNAME: "" +COMMON_DEPLOY_HOSTNAME: "" # Set to true to customize DNS search domains COMMON_CUSTOM_DHCLIENT_CONFIG: false @@ -81,6 +86,8 @@ COMMON_SECURITY_UPDATES: no COMMON_MYSQL_READ_ONLY_USER: 'read_only' COMMON_MYSQL_READ_ONLY_PASS: 'password' +COMMON_ANALYTICS_MYSQL_READ_ONLY_USER: 'read_only' +COMMON_ANALYTICS_MYSQL_READ_ONLY_PASS: 'password' COMMON_MYSQL_ADMIN_USER: 'admin' COMMON_MYSQL_ADMIN_PASS: 'password' COMMON_MYSQL_MIGRATE_USER: 'migrate' @@ -98,8 +105,10 @@ COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE: False COMMON_ENABLE_NEWRELIC_APP: False COMMON_ENABLE_MINOS: False COMMON_TAG_EC2_INSTANCE: False -common_boto_version: '2.48.0' -common_node_version: '8.9.3' +COMMON_BOTO_VERSION: '2.48.0' +common_boto_version: "{{ COMMON_BOTO_VERSION }}" +COMMON_NODE_VERSION: '12.13.0' +common_node_version: "{{ COMMON_NODE_VERSION }}" common_redhat_pkgs: - ntp - lynx @@ -113,7 +122,7 @@ common_debian_pkgs: - ntp - acl - iotop - - lynx-cur + - lynx - logrotate - rsyslog - git @@ -122,18 +131,22 @@ common_debian_pkgs: - python2.7-dev - libpq-dev +common_debian_devstack_pkgs: + - watchman + # Packages that should be installed from our custom PPA, i.e. COMMON_EDX_PPA old_python_debian_pkgs: - "python2.7=2.7.10-0+{{ ansible_distribution_release }}1" -common_pip_version: '9.0.3' +COMMON_PIP_VERSION: '20.0.2' +common_pip_version: "{{ COMMON_PIP_VERSION }}" common_pip_pkgs: - pip=={{ common_pip_version }} - - setuptools==39.0.1 - - virtualenv==15.2.0 - - virtualenvwrapper==4.8.2 + - setuptools==44.1.0 + - virtualenv==16.7.10 + - boto3 common_web_user: www-data common_web_group: www-data @@ -169,6 +182,11 @@ common_redhat_variants: - Red Hat Enterprise Linux - Amazon +# Gunicorn pre_request function to log request before it is processed further. +common_pre_request: | + def pre_request(worker, req): + worker.log.info("%s %s" % (req.method, req.path)) + # Code used in gunicorn post_fork functions to be sure we aren't sharing cache # connections among forked children. common_close_all_caches: | @@ -214,18 +232,53 @@ COMMON_TRACKING_LOG_ROTATION: COMMON_EXTRA_CONFIGURATION_SOURCES_CHECKING: false COMMON_EXTRA_CONFIGURATION_SOURCES: [] -COMMON_OAUTH_BASE_URL: 'http://127.0.0.1:8000' +COMMON_LMS_BASE_URL: 'http://127.0.0.1:8000' +COMMON_OAUTH_BASE_URL: '{{ COMMON_LMS_BASE_URL }}' COMMON_OAUTH_PUBLIC_URL_ROOT: '{{ COMMON_OAUTH_BASE_URL }}/oauth2' COMMON_OAUTH_URL_ROOT: '{{ COMMON_OAUTH_PUBLIC_URL_ROOT }}' COMMON_OAUTH_LOGOUT_URL: '{{ COMMON_OAUTH_BASE_URL }}/logout' -COMMON_OIDC_ISSUER: '{{ COMMON_OAUTH_URL_ROOT }}' - +############ +# Settings related to JSON Web Tokens (JWTs). +# See https://github.com/edx/edx-platform/blob/master/openedx/core/djangoapps/oauth_dispatch/docs/decisions/0003-use-jwt-as-oauth-tokens-remove-openid-connect.rst COMMON_JWT_AUDIENCE: 'SET-ME-PLEASE' -COMMON_JWT_ISSUER: '{{ COMMON_OIDC_ISSUER }}' +COMMON_JWT_ISSUER: '{{ COMMON_OAUTH_URL_ROOT }}' + +# The following should be the string representation of a JSON Web Key Set (JWK set) +# containing active public keys for signing JWTs. +# See https://github.com/edx/edx-platform/blob/master/openedx/core/djangoapps/oauth_dispatch/docs/decisions/0008-use-asymmetric-jwts.rst +COMMON_JWT_PUBLIC_SIGNING_JWK_SET: '' + +COMMON_JWT_AUTH_COOKIE_HEADER_PAYLOAD: 'edx-jwt-cookie-header-payload' +COMMON_JWT_AUTH_COOKIE_SIGNATURE: 'edx-jwt-cookie-signature' +COMMON_JWT_AUTH_REFRESH_COOKIE: 'edx-jwt-refresh-cookie' + +# To be deprecated, in favor of the above COMMON_JWT_PUBLIC_SIGNING_JWK_SET. COMMON_JWT_SECRET_KEY: 'SET-ME-PLEASE' +############ # Set worker user default CREATE_SERVICE_WORKER_USERS: True COMMON_ENABLE_AWS_ROLE: true + +# Remote config +COMMON_HERMES_ENABLED: false + +COMMON_DECRYPT_CONFIG_ENABLED: false +COMMON_COPY_CONFIG_ENABLED: false + +# Disable logging of config rendering which has secrets +COMMON_CONFIG_NO_LOGGING: True + +# Default sandbox build flag to false +SANDBOX_CONFIG: False + +# Should we create the JWT settings? +CONFIGURE_JWTS: false + +# Variable to control setting up the retirement services +COMMON_RETIREMENT_SERVICE_SETUP: false + +# How to log in as "lms" to xqueue. +COMMON_XQUEUE_LMS_PASSWORD: password diff --git a/playbooks/roles/alton/meta/main.yml b/playbooks/roles/conductor/defaults/main.yml similarity index 62% rename from playbooks/roles/alton/meta/main.yml rename to playbooks/roles/conductor/defaults/main.yml index ede10280db0..cc2263be26c 100644 --- a/playbooks/roles/alton/meta/main.yml +++ b/playbooks/roles/conductor/defaults/main.yml @@ -8,19 +8,14 @@ # license: https://github.com/edx/configuration/blob/master/LICENSE.TXT # ## -# Role includes for role alton -# -# Example: +# Defaults for role conductor # -# dependencies: -# - { -# role: my_role -# my_role_var0: "foo" -# my_role_var1: "bar" -# } -dependencies: - - common - - supervisor - - redis +# .env vars + +# nginx vars +NGINX_CONDUCTOR_PROXY_INTERCEPT_ERRORS: true +CONDUCTOR_STATIC_SITES: [] +CONDUCTOR_REDIRECT_ROOT: false +CONDUCTOR_ROOT_REDIRECT_PATH: "" diff --git a/playbooks/roles/conductor/meta/main.yml b/playbooks/roles/conductor/meta/main.yml new file mode 100644 index 00000000000..3d12d718ea7 --- /dev/null +++ b/playbooks/roles/conductor/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - nginx diff --git a/playbooks/roles/conductor/tasks/main.yml b/playbooks/roles/conductor/tasks/main.yml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/playbooks/roles/config-encoders/filter_plugins/config_encoders.py b/playbooks/roles/config-encoders/filter_plugins/config_encoders.py index 767520c3dfa..91a43f8256b 100644 --- a/playbooks/roles/config-encoders/filter_plugins/config_encoders.py +++ b/playbooks/roles/config-encoders/filter_plugins/config_encoders.py @@ -25,6 +25,8 @@ from ansible import errors from copy import copy import re +import six +from six.moves import map def _str_is_bool(data): @@ -167,7 +169,7 @@ def encode_apache( elif block_type == 'options': for o in data: - for key, val in sorted(o.iteritems()): + for key, val in sorted(six.iteritems(o)): rv += "%s%s " % (indent * (level-1), key) rv += encode_apache( val, @@ -195,7 +197,7 @@ def encode_apache( else: rv += str(data) - elif isinstance(data, basestring): + elif isinstance(data, six.string_types): # Value is a string if ( quote_all_strings or @@ -242,7 +244,7 @@ def encode_erlang( rv += "\n" - for key, val in sorted(data.iteritems()): + for key, val in sorted(six.iteritems(data)): rv += "%s{%s," % (indent*level, key) if not isinstance(val, dict): @@ -266,7 +268,7 @@ def encode_erlang( rv += str(data).lower() - elif isinstance(data, basestring): + elif isinstance(data, six.string_types): # It's a string atom_len = len(atom_value_indicator) @@ -287,7 +289,7 @@ def encode_erlang( for val in data: if ( - isinstance(val, basestring) or + isinstance(val, six.string_types) or _is_num(val)): rv += "\n%s" % (indent*level) @@ -336,10 +338,10 @@ def encode_haproxy(data, indent=" "): if isinstance(section, dict): # It's a section - rv += "%s\n" % section.keys()[0] + rv += "%s\n" % list(section.keys())[0] # Process all parameters of the section - for param in section.values()[0]: + for param in list(section.values())[0]: rv += "%s%s\n" % (indent, param) else: # It's a comment of a parameter @@ -358,7 +360,7 @@ def encode_ini( rv = "" # First process all standalone properties - for prop, val in sorted(data.iteritems()): + for prop, val in sorted(six.iteritems(data)): if ucase_prop: prop = prop.upper() @@ -375,7 +377,7 @@ def encode_ini( prop, delimiter, quote, _escape(item, quote), quote) # Then process all sections - for section, props in sorted(data.iteritems()): + for section, props in sorted(six.iteritems(data)): if isinstance(props, dict): if rv != "": rv += "\n" @@ -411,7 +413,7 @@ def encode_json( if len(data) > 0: rv += "\n" - items = sorted(data.iteritems()) + items = sorted(six.iteritems(data)) for key, val in items: rv += '%s"%s": ' % (indent * (level+1), key) @@ -445,7 +447,7 @@ def encode_json( rv += str(data).lower() - elif isinstance(data, basestring): + elif isinstance(data, six.string_types): # It's a string rv += '"%s"' % _escape(_escape(data), format='control') @@ -495,7 +497,7 @@ def encode_logstash( if prevtype in ('value', 'value_hash', 'array'): rv += "{\n" - items = sorted(data.iteritems()) + items = sorted(six.iteritems(data)) for key, val in items: if key[0] == section_prefix: @@ -511,11 +513,11 @@ def encode_logstash( # Last item of the loop if items[-1] == (key, val): if ( - isinstance(val, basestring) or + isinstance(val, six.string_types) or _is_num(val) or isinstance(val, bool) or ( isinstance(val, dict) and - val.keys()[0][0] != section_prefix)): + list(val.keys())[0][0] != section_prefix)): rv += "\n%s}\n" % (indent * level) else: rv += "%s}\n" % (indent * level) @@ -538,7 +540,7 @@ def encode_logstash( if ( items[-1] != (key, val) and ( - isinstance(val, basestring) or + isinstance(val, six.string_types) or _is_num(val) or isinstance(val, bool))): rv += "\n" @@ -558,7 +560,7 @@ def encode_logstash( rv += str(data).lower() - elif isinstance(data, basestring): + elif isinstance(data, six.string_types): # It's a string rv += '"%s"' % _escape(data) @@ -567,7 +569,7 @@ def encode_logstash( # It's a list for val in data: - if isinstance(val, dict) and val.keys()[0][0] == section_prefix: + if isinstance(val, dict) and list(val.keys())[0][0] == section_prefix: # Value is a block rv += encode_logstash( @@ -614,16 +616,16 @@ def encode_nginx(data, indent=" ", level=0, block_semicolon=False): if item_type in ('section', 'line'): rv += "\n" - rv += "%s%s {\n" % (level*indent, item.keys()[0]) + rv += "%s%s {\n" % (level*indent, list(item.keys())[0]) rv += encode_nginx( - item.values()[0], + list(item.values())[0], level=level+1, block_semicolon=block_semicolon) rv += "%s}%s\n" % (level*indent, ';' if block_semicolon else '') item_type = 'section' - elif isinstance(item, basestring): + elif isinstance(item, six.string_types): # Normal line if item_type == 'section': rv += "\n" @@ -654,7 +656,7 @@ def encode_pam( # Remember previous type to make newline between type blocks prev_type = None - for label, rule in sorted(data.iteritems()): + for label, rule in sorted(six.iteritems(data)): if separate_types: # Add extra newline to separate blocks of the same type if prev_type is not None and prev_type != rule['type']: @@ -676,9 +678,7 @@ def encode_pam( if isinstance(rule['control'], list): rv += "[%s]%s" % ( " ".join( - map( - lambda k: "=".join(map(str, k)), - map(lambda x: x.items()[0], rule['control']))), + ["=".join(map(str, k)) for k in [list(x.items())[0] for x in rule['control']]]), separator) else: rv += "%s%s" % (rule['control'], separator) @@ -693,7 +693,7 @@ def encode_pam( rv += ' ' if isinstance(arg, dict): - rv += "=".join(map(str, arg.items()[0])) + rv += "=".join(map(str, list(arg.items())[0])) else: rv += arg @@ -714,9 +714,9 @@ def encode_toml( # It's a dict # First process all standalone strings, numbers, booleans and lists - for key, val in sorted(data.iteritems()): + for key, val in sorted(six.iteritems(data)): if ( - isinstance(val, basestring) or + isinstance(val, six.string_types) or _is_num(val) or isinstance(val, bool) or ( isinstance(val, list) and @@ -737,7 +737,7 @@ def encode_toml( first = False # Then process all data structures - for key, val in sorted(data.iteritems()): + for key, val in sorted(six.iteritems(data)): if ( isinstance(val, dict) or isinstance(val, list) and isinstance(val[0], dict)): @@ -798,7 +798,7 @@ def encode_toml( if prevtype != 'list': rv += "\n" - elif isinstance(data, basestring): + elif isinstance(data, six.string_types): # It's a string rv += "%s%s%s" % ( @@ -858,7 +858,7 @@ def encode_xml( if ( not ( isinstance(item, dict) and - item.keys()[0].startswith(attribute_sign))): + list(item.keys())[0].startswith(attribute_sign))): rv += encode_xml( item, attribute_sign=attribute_sign, @@ -868,7 +868,7 @@ def encode_xml( elif isinstance(data, dict): # It's eiher an attribute or an element - key, val = data.items()[0] + key, val = list(data.items())[0] if key.startswith(attribute_sign): # Process attribute @@ -884,7 +884,7 @@ def encode_xml( for item in val: if ( isinstance(item, dict) and - item.keys()[0].startswith(attribute_sign)): + list(item.keys())[0].startswith(attribute_sign)): num_attrs += 1 rv += encode_xml( item, @@ -907,7 +907,7 @@ def encode_xml( for item in val: if ( isinstance(item, dict) and - not item.keys()[0].startswith(attribute_sign)): + not list(item.keys())[0].startswith(attribute_sign)): val_not_text = True break elif isinstance(val, dict): @@ -947,14 +947,14 @@ def encode_yaml( if isinstance(data, dict): # It's a dictionary - if len(data.keys()) == 0: + if len(list(data.keys())) == 0: rv += "{}\n" else: - for i, (key, val) in enumerate(sorted(data.iteritems())): + for i, (key, val) in enumerate(sorted(six.iteritems(data))): # Skip indentation only for the first pair rv += "%s%s:" % ("" if i == 0 and skip_indent else level*indent, key) - if isinstance(val, dict) and len(val.keys()) == 0: + if isinstance(val, dict) and len(list(val.keys())) == 0: rv += " {}\n" else: if ( @@ -1042,12 +1042,11 @@ def template_replace(data, replacement): # Walk through the data structure and try to replace all special strings if isinstance(local_data, list): - local_data = map( - lambda x: template_replace(x, replacement), local_data) + local_data = [template_replace(x, replacement) for x in local_data] elif isinstance(local_data, dict): - for key, val in local_data.iteritems(): + for key, val in six.iteritems(local_data): local_data[key] = template_replace(val, replacement) - elif isinstance(local_data, basestring): + elif isinstance(local_data, six.string_types): # Replace the special string by it's evaluated value p = re.compile(r'\{\[\{\s*(\w+)([^}\s]+|)\s*\}\]\}') local_data = p.sub(__eval_replace, local_data) diff --git a/playbooks/roles/container/tasks/main.yml b/playbooks/roles/container/tasks/main.yml deleted file mode 100644 index 8eafe1e4468..00000000000 --- a/playbooks/roles/container/tasks/main.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS -# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Tasks for role container -# -# Overview: -# -# This is a special role for providing common, container specific -# tasks and serving as a common ancestor for specific container -# providers, say, docker and rocket. -# - diff --git a/playbooks/roles/credentials/defaults/main.yml b/playbooks/roles/credentials/defaults/main.yml index cab27bafb65..5dcdf81d7a8 100644 --- a/playbooks/roles/credentials/defaults/main.yml +++ b/playbooks/roles/credentials/defaults/main.yml @@ -23,6 +23,8 @@ credentials_environment: CREDENTIALS_CFG: '{{ COMMON_CFG_DIR }}/{{ credentials_service_name }}.yml' credentials_gunicorn_port: 8150 +CREDENTIALS_NODE_VERSION: '8.9.3' +credentials_node_version: "{{ CREDENTIALS_NODE_VERSION }}" # # OS packages @@ -43,7 +45,7 @@ CREDENTIALS_MYSQL_HOST: 'localhost' # MySQL usernames are limited to 16 characters CREDENTIALS_MYSQL_USER: 'credentials001' CREDENTIALS_MYSQL_PASSWORD: 'password' -CREDENTIALS_MYSQL_OPTIONS: {} +CREDENTIALS_MYSQL_CONN_MAX_AGE: 60 CREDENTIALS_MEMCACHE: [ 'memcache' ] @@ -57,8 +59,10 @@ CREDENTIALS_LANGUAGE_CODE: 'en' CREDENTIALS_LANGUAGE_COOKIE_NAME: 'openedx-language-preference' # Used to automatically configure OAuth2 Client -CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_KEY: 'credentials-key' -CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_SECRET: 'credentials-secret' +CREDENTIALS_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'credentials-sso-key' +CREDENTIALS_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'credentials-sso-secret' +CREDENTIALS_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'credentials-backend-service-key' +CREDENTIALS_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'credentials-backend-service-secret' CREDENTIALS_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false CREDENTIALS_SERVICE_USER: 'credentials_service_user' @@ -159,12 +163,15 @@ CREDENTIALS_CERTIFICATE_LANGUAGES: 'en': 'English' 'es_419': 'Spanish' +CREDENTIALS_USERNAME_REPLACEMENT_WORKER: "OVERRIDE THIS WITH A VALID USERNAME" + credentials_service_config_overrides: CERTIFICATE_LANGUAGES: '{{ CREDENTIALS_CERTIFICATE_LANGUAGES }}' CREDENTIALS_SERVICE_USER: '{{ CREDENTIALS_SERVICE_USER }}' FILE_STORAGE_BACKEND: '{{ CREDENTIALS_FILE_STORAGE_BACKEND }}' LANGUAGE_COOKIE_NAME: '{{ CREDENTIALS_LANGUAGE_COOKIE_NAME }}' CSRF_COOKIE_SECURE: "{{ CREDENTIALS_CSRF_COOKIE_SECURE }}" + USERNAME_REPLACEMENT_WORKER: "{{ CREDENTIALS_USERNAME_REPLACEMENT_WORKER }}" # See edx_django_service_automated_users for an example of what this should be CREDENTIALS_AUTOMATED_USERS: {} @@ -177,6 +184,16 @@ CREDENTIALS_DISCOVERY_API_URL: !!null CREDENTIALS_CSRF_COOKIE_SECURE: false +CREDENTIALS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + credentials_post_migrate_commands: - command: './manage.py create_or_update_site --site-id=1 --site-domain={{ CREDENTIALS_DOMAIN }} --site-name="Open edX" --platform-name="Open edX" --company-name="Open edX" --lms-url-root={{ CREDENTIALS_LMS_URL_ROOT }} --catalog-api-url={{ CREDENTIALS_DISCOVERY_API_URL }} --tos-url={{ CREDENTIALS_LMS_URL_ROOT }}/tos --privacy-policy-url={{ CREDENTIALS_LMS_URL_ROOT }}/privacy --homepage-url={{ CREDENTIALS_LMS_URL_ROOT }} --certificate-help-url={{ CREDENTIALS_LMS_URL_ROOT }}/faq --records-help-url={{ CREDENTIALS_LMS_URL_ROOT }}/faq --theme-name=openedx' when: '{{ credentials_create_demo_data }}' + + + +# Remote config +CREDENTIALS_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +CREDENTIALS_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +CREDENTIALS_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" diff --git a/playbooks/roles/credentials/meta/main.yml b/playbooks/roles/credentials/meta/main.yml index 862ec8fc4be..d8633116867 100644 --- a/playbooks/roles/credentials/meta/main.yml +++ b/playbooks/roles/credentials/meta/main.yml @@ -35,10 +35,19 @@ dependencies: edx_django_service_db_options: '{{ CREDENTIALS_MYSQL_OPTIONS }}' edx_django_service_social_auth_edx_oidc_key: '{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_KEY }}' edx_django_service_social_auth_edx_oidc_secret: '{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_SECRET }}' + edx_django_service_default_db_conn_max_age: '{{ CREDENTIALS_MYSQL_CONN_MAX_AGE }}' + edx_django_service_social_auth_edx_oauth2_key: '{{ CREDENTIALS_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ CREDENTIALS_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ CREDENTIALS_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ CREDENTIALS_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' edx_django_service_social_auth_redirect_is_https: '{{ CREDENTIALS_SOCIAL_AUTH_REDIRECT_IS_HTTPS }}' edx_django_service_extra_apps: '{{ CREDENTIALS_EXTRA_APPS }}' edx_django_service_session_expire_at_browser_close: '{{ CREDENTIALS_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' + edx_django_service_node_version: '{{ credentials_node_version }}' edx_django_service_automated_users: '{{ CREDENTIALS_AUTOMATED_USERS }}' edx_django_service_cors_whitelist: '{{ CREDENTIALS_CORS_ORIGIN_WHITELIST }}' edx_django_service_post_migrate_commands: '{{ credentials_post_migrate_commands }}' edx_django_service_repos: '{{ CREDENTIALS_REPOS }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ CREDENTIALS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_decrypt_config_enabled: '{{ CREDENTIALS_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ CREDENTIALS_COPY_CONFIG_ENABLED }}' diff --git a/playbooks/roles/datadog/defaults/main.yml b/playbooks/roles/datadog/defaults/main.yml index 036a1c4f2be..282e3a0ffd6 100644 --- a/playbooks/roles/datadog/defaults/main.yml +++ b/playbooks/roles/datadog/defaults/main.yml @@ -1,7 +1,8 @@ --- DATADOG_API_KEY: "SPECIFY_KEY_HERE" -datadog_agent_version: '1:5.10.1-1' +DATADOG_AGENT_VERSION: '1:5.10.1-1' +datadog_agent_version: "{{ DATADOG_AGENT_VERSION }}" datadog_apt_key: "0x382E94DE" datadog_debian_pkgs: diff --git a/playbooks/roles/datadog/tasks/main.yml b/playbooks/roles/datadog/tasks/main.yml index e2d88f9df5e..98b7161310c 100644 --- a/playbooks/roles/datadog/tasks/main.yml +++ b/playbooks/roles/datadog/tasks/main.yml @@ -15,9 +15,8 @@ # - name: Install debian needed pkgs apt: - name: "{{ item }}" + name: "{{ datadog_debian_pkgs }}" state: present - with_items: "{{ datadog_debian_pkgs }}" tags: - datadog diff --git a/playbooks/roles/demo/defaults/main.yml b/playbooks/roles/demo/defaults/main.yml index f3f3d2499d1..5a3f68fe2f5 100644 --- a/playbooks/roles/demo/defaults/main.yml +++ b/playbooks/roles/demo/defaults/main.yml @@ -17,7 +17,8 @@ demo_app_dir: "{{ COMMON_APP_DIR }}/demo" demo_code_dir: "{{ demo_app_dir }}/edx-demo-course" demo_repo: "https://{{ COMMON_GIT_MIRROR }}/edx/edx-demo-course.git" demo_course_id: 'course-v1:edX+DemoX+Demo_Course' -demo_version: "master" +DEMO_VERSION: "master" +demo_version: "{{ DEMO_VERSION }}" demo_hashed_password: 'pbkdf2_sha256$20000$TjE34FJjc3vv$0B7GUmH8RwrOc/BvMoxjb5j8EgnWTt3sxorDANeF7Qw=' # edx demo_test_users: - email: 'honor@example.com' diff --git a/playbooks/roles/demo/tasks/deploy.yml b/playbooks/roles/demo/tasks/deploy.yml index a986c6103d9..13ad86a5aa7 100644 --- a/playbooks/roles/demo/tasks/deploy.yml +++ b/playbooks/roles/demo/tasks/deploy.yml @@ -58,5 +58,4 @@ shell: ". {{ demo_edxapp_env }} && {{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings={{ demo_edxapp_settings }} seed_permissions_roles {{ demo_course_id }}" args: chdir: "{{ demo_edxapp_code_dir }}" - with_items: "{{ demo_test_users }}" when: demo_checkout.changed diff --git a/playbooks/roles/designer/defaults/main.yml b/playbooks/roles/designer/defaults/main.yml new file mode 100644 index 00000000000..cc2fc8f842f --- /dev/null +++ b/playbooks/roles/designer/defaults/main.yml @@ -0,0 +1,129 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role designer +# + + +# +# vars are namespace with the module name. +# +designer_service_name: 'designer' + +designer_environment: + DESIGNER_CFG: '{{ COMMON_CFG_DIR }}/{{ designer_service_name }}.yml' + +designer_gunicorn_port: 8808 + +designer_debian_pkgs: [] + +DESIGNER_NGINX_PORT: '1{{ designer_gunicorn_port }}' +DESIGNER_SSL_NGINX_PORT: '4{{ designer_gunicorn_port }}' + +DESIGNER_DEFAULT_DB_NAME: 'designer' +DESIGNER_MYSQL_HOST: 'localhost' +# MySQL usernames are limited to 16 characters +DESIGNER_MYSQL_USER: 'designer001' +DESIGNER_MYSQL_PASSWORD: 'password' +DESIGNER_MYSQL_CONN_MAX_AGE: 60 + +DESIGNER_MEMCACHE: [ 'memcache' ] + +DESIGNER_DJANGO_SETTINGS_MODULE: 'designer.settings.production' +DESIGNER_DOMAIN: 'localhost' +DESIGNER_URL_ROOT: 'http://{{ DESIGNER_DOMAIN }}:{{ DESIGNER_NGINX_PORT }}' +DESIGNER_LOGOUT_URL: '{{ DESIGNER_URL_ROOT }}/logout/' + +DESIGNER_LANGUAGE_CODE: 'en' +DESIGNER_LANGUAGE_COOKIE_NAME: 'openedx-language-preference' + +DESIGNER_SERVICE_USER: 'designer_service_user' + +DESIGNER_DATA_DIR: '{{ COMMON_DATA_DIR }}/{{ designer_service_name }}' +DESIGNER_MEDIA_ROOT: '{{ DESIGNER_DATA_DIR }}/media' +DESIGNER_MEDIA_URL: '/media/' + +DESIGNER_MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' + MEDIA_ROOT: '{{ DESIGNER_MEDIA_ROOT }}' + MEDIA_URL: '{{ DESIGNER_MEDIA_URL }}' + +# TODO: Let edx_django_service manage DESIGNER_STATIC_ROOT in phase 2. +DESIGNER_STATIC_ROOT: '{{ DESIGNER_DATA_DIR }}/staticfiles' +DESIGNER_STATIC_URL: '/static/' + +DESIGNER_STATICFILES_STORAGE: 'django.contrib.staticfiles.storage.StaticFilesStorage' + +DESIGNER_CORS_ORIGIN_ALLOW_ALL: false +DESIGNER_CORS_ORIGIN_WHITELIST_DEFAULT: + - '{{ DESIGNER_DOMAIN }}' + +DESIGNER_CORS_ORIGIN_WHITELIST_EXTRA: [] +DESIGNER_CORS_ORIGIN_WHITELIST: '{{ DESIGNER_CORS_ORIGIN_WHITELIST_DEFAULT + DESIGNER_CORS_ORIGIN_WHITELIST_EXTRA }}' + +DESIGNER_VERSION: 'master' + +DESIGNER_GUNICORN_EXTRA: '' + +DESIGNER_EXTRA_APPS: [] + +DESIGNER_SESSION_EXPIRE_AT_BROWSER_CLOSE: false + +DESIGNER_CERTIFICATE_LANGUAGES: + 'en': 'English' + 'es_419': 'Spanish' + +designer_service_config_overrides: + CERTIFICATE_LANGUAGES: '{{ DESIGNER_CERTIFICATE_LANGUAGES }}' + DESIGNER_SERVICE_USER: '{{ DESIGNER_SERVICE_USER }}' + LANGUAGE_COOKIE_NAME: '{{ DESIGNER_LANGUAGE_COOKIE_NAME }}' + CSRF_COOKIE_SECURE: "{{ DESIGNER_CSRF_COOKIE_SECURE }}" + +# See edx_django_service_automated_users for an example of what this should be +DESIGNER_AUTOMATED_USERS: {} + +# NOTE: These variables are only needed to create the demo site (e.g. for sandboxes) +DESIGNER_LMS_URL_ROOT: !!null +DESIGNER_DISCOVERY_API_URL: !!null + +DESIGNER_CSRF_COOKIE_SECURE: false + +DESIGNER_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +designer_post_migrate_commands: [] + +DESIGNER_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'designer-sso-key' +DESIGNER_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'designer-sso-secret' +DESIGNER_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'designer-backend-service-key' +DESIGNER_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'designer-backend-service-secret' +DESIGNER_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false + +DESIGNER_GIT_IDENTITY: !!null + +designer_service_name: "designer" +designer_home: "{{ COMMON_APP_DIR }}/{{ designer_service_name }}" +designer_code_dir: "{{ designer_home }}/{{ designer_service_name }}" + +DESIGNER_REPOS: + - PROTOCOL: '{{ COMMON_GIT_PROTOCOL }}' + DOMAIN: '{{ COMMON_GIT_MIRROR }}' + PATH: '{{ COMMON_GIT_PATH }}' + REPO: 'portal-designer.git' + VERSION: '{{ DESIGNER_VERSION }}' + DESTINATION: "{{ designer_code_dir }}" + SSH_KEY: '{{ DESIGNER_GIT_IDENTITY }}' + +DESIGNER_SECRET_KEY: 'SET-ME-PLEASE' + +# Remote config +DESIGNER_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +DESIGNER_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +DESIGNER_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" diff --git a/playbooks/roles/designer/meta/main.yml b/playbooks/roles/designer/meta/main.yml new file mode 100644 index 00000000000..b52dd7273f9 --- /dev/null +++ b/playbooks/roles/designer/meta/main.yml @@ -0,0 +1,48 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role ansible-role-django-ida + +dependencies: + - role: edx_django_service + edx_django_service_version: '{{ DESIGNER_VERSION }}' + edx_django_service_name: '{{ designer_service_name }}' + edx_django_service_config_overrides: '{{ designer_service_config_overrides }}' + edx_django_service_debian_pkgs_extra: '{{ designer_debian_pkgs }}' + edx_django_service_gunicorn_port: '{{ designer_gunicorn_port }}' + edx_django_service_repos: '{{ DESIGNER_REPOS }}' + edx_django_service_django_settings_module: '{{ DESIGNER_DJANGO_SETTINGS_MODULE }}' + edx_django_service_environment_extra: '{{ designer_environment }}' + edx_django_service_gunicorn_extra: '{{ DESIGNER_GUNICORN_EXTRA }}' + edx_django_service_nginx_port: '{{ DESIGNER_NGINX_PORT }}' + edx_django_service_ssl_nginx_port: '{{ DESIGNER_SSL_NGINX_PORT }}' + edx_django_service_language_code: '{{ DESIGNER_LANGUAGE_CODE }}' + edx_django_service_secret_key: '{{ DESIGNER_SECRET_KEY }}' + edx_django_service_media_storage_backend: '{{ DESIGNER_MEDIA_STORAGE_BACKEND }}' + edx_django_service_staticfiles_storage: '{{ DESIGNER_STATICFILES_STORAGE }}' + edx_django_service_memcache: '{{ DESIGNER_MEMCACHE }}' + edx_django_service_default_db_host: '{{ DESIGNER_MYSQL_HOST }}' + edx_django_service_default_db_name: '{{ DESIGNER_DEFAULT_DB_NAME }}' + edx_django_service_default_db_atomic_requests: false + edx_django_service_db_user: '{{ DESIGNER_MYSQL_USER }}' + edx_django_service_db_password: '{{ DESIGNER_MYSQL_PASSWORD }}' + edx_django_service_default_db_conn_max_age: '{{ DESIGNER_MYSQL_CONN_MAX_AGE }}' + edx_django_service_extra_apps: '{{ DESIGNER_EXTRA_APPS }}' + edx_django_service_session_expire_at_browser_close: '{{ DESIGNER_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' + edx_django_service_social_auth_edx_oauth2_key: '{{ DESIGNER_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ DESIGNER_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ DESIGNER_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ DESIGNER_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + edx_django_service_automated_users: '{{ DESIGNER_AUTOMATED_USERS }}' + edx_django_service_cors_whitelist: '{{ DESIGNER_CORS_ORIGIN_WHITELIST }}' + edx_django_service_post_migrate_commands: '{{ designer_post_migrate_commands }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ DESIGNER_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_decrypt_config_enabled: '{{ DESIGNER_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ DESIGNER_COPY_CONFIG_ENABLED }}' diff --git a/playbooks/roles/docker/tasks/main.yml b/playbooks/roles/designer/tasks/main.yml similarity index 67% rename from playbooks/roles/docker/tasks/main.yml rename to playbooks/roles/designer/tasks/main.yml index 3f3db712965..89fe4319c07 100644 --- a/playbooks/roles/docker/tasks/main.yml +++ b/playbooks/roles/designer/tasks/main.yml @@ -9,13 +9,14 @@ # # # -# Tasks for role docker -# -# Overview: +# Tasks for role designer +# +# Overview: This role's tasks come from edx_django_service. # -# This is a special role for providing common, docker specific tasks. # # Dependencies: # -# Inherits from the container role via meta/main.yml -# +# +# Example play: +# +# diff --git a/playbooks/roles/devstack_sqlite_fix/defaults/main.yml b/playbooks/roles/devstack_sqlite_fix/defaults/main.yml deleted file mode 100644 index 565e0b59ff1..00000000000 --- a/playbooks/roles/devstack_sqlite_fix/defaults/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -SQLITE_FIX_TMP_DIR: "/var/tmp/sqlite_fix" - -PYSQLITE_URL: "https://codeload.github.com/ghaering/pysqlite/tar.gz/2.8.3" -PYSQLITE_CREATED_PATH: "pysqlite-2.8.3" -PYSQLITE_TMP_PATH: "{{ SQLITE_FIX_TMP_DIR }}/{{ PYSQLITE_CREATED_PATH }}" - -SQLITE_AUTOCONF_URL: "https://www.sqlite.org/2016/sqlite-autoconf-3140100.tar.gz" -SQLITE_AUTOCONF_CREATED_PATH: "sqlite-autoconf-3140100" -SQLITE_TMP_PATH: "{{ SQLITE_FIX_TMP_DIR }}/{{ SQLITE_AUTOCONF_CREATED_PATH }}" diff --git a/playbooks/roles/devstack_sqlite_fix/tasks/main.yml b/playbooks/roles/devstack_sqlite_fix/tasks/main.yml deleted file mode 100644 index 2e885e1f141..00000000000 --- a/playbooks/roles/devstack_sqlite_fix/tasks/main.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -- name: Creates directory - file: - path: "{{ SQLITE_FIX_TMP_DIR }}" - state: directory - mode: 0775 - when: devstack is defined and devstack - tags: - - devstack - - devstack:install - - -# Tasks to download and upgrade pysqlite to prevent segfaults when testing in devstack -- name: Download and unzip sqlite autoconf update - unarchive: - src: "{{ SQLITE_AUTOCONF_URL }}" - dest: "{{ SQLITE_FIX_TMP_DIR }}" - remote_src: yes - when: devstack is defined and devstack - tags: - - devstack - - devstack:install - -- name: Download and unzip pysqlite update - unarchive: - src: "{{ PYSQLITE_URL }}" - dest: "{{ SQLITE_FIX_TMP_DIR }}" - remote_src: yes - when: devstack is defined and devstack - tags: - - devstack - - devstack:install - -# Copy module doesn't support recursive dir copies for remote_src: yes -- name: Copy pysqlite autoconf into pyslite update dir - command: "cp -av . {{ PYSQLITE_TMP_PATH }}/" - args: - chdir: "{{ SQLITE_TMP_PATH }}" - when: devstack is defined and devstack - tags: - - devstack - - devstack:install - -- name: Build and install pysqlite update - command: "python setup.py build_static install" - args: - chdir: "{{ PYSQLITE_TMP_PATH }}" - when: devstack is defined and devstack - tags: - - devstack - - devstack:install - -- name: Clean up pysqlite install artifacts - file: - state: absent - path: "{{ SQLITE_FIX_TMP_DIR }}/" - when: devstack is defined and devstack - tags: - - devstack - - devstack:install diff --git a/playbooks/roles/discovery/defaults/main.yml b/playbooks/roles/discovery/defaults/main.yml index 2a68305cb8f..a2a2e6c7ff9 100644 --- a/playbooks/roles/discovery/defaults/main.yml +++ b/playbooks/roles/discovery/defaults/main.yml @@ -19,6 +19,14 @@ DISCOVERY_GIT_IDENTITY: !!null discovery_service_name: "discovery" discovery_gunicorn_port: 8381 +DISCOVERY_DEFAULT_DB_NAME: 'discovery' +DISCOVERY_MYSQL: 'localhost' +# MySQL usernames are limited to 16 characters +DISCOVERY_MYSQL_USER: 'discov001' +DISCOVERY_MYSQL_PASSWORD: 'password' +DISCOVERY_MYSQL_REPLICA_HOST: 'localhost' +DISCOVERY_MYSQL_CONN_MAX_AGE: 60 + discovery_environment: DISCOVERY_CFG: "{{ COMMON_CFG_DIR }}/{{ discovery_service_name }}.yml" @@ -26,6 +34,9 @@ discovery_user: "{{ discovery_service_name }}" discovery_home: "{{ COMMON_APP_DIR }}/{{ discovery_service_name }}" discovery_code_dir: "{{ discovery_home }}/{{ discovery_service_name }}" +DISCOVERY_NODE_VERSION: '12.11.1' +discovery_node_version: "{{ DISCOVERY_NODE_VERSION }}" + # # OS packages # @@ -39,12 +50,6 @@ discovery_debian_pkgs: DISCOVERY_NGINX_PORT: "1{{ discovery_gunicorn_port }}" DISCOVERY_SSL_NGINX_PORT: "4{{ discovery_gunicorn_port }}" -DISCOVERY_DEFAULT_DB_NAME: 'discovery' -DISCOVERY_MYSQL: 'localhost' -# MySQL usernames are limited to 16 characters -DISCOVERY_MYSQL_USER: 'discov001' -DISCOVERY_MYSQL_PASSWORD: 'password' - # Using SSL? See https://www.elastic.co/guide/en/shield/current/ssl-tls.html. # Using AWS? Use the AWS-provided host (e.g. https://search-test-abc123.us-east-1.es.amazonaws.com/). # Assuming the port corresponds that of the protocol (e.g. http=80, https=443), there is no need to specify a port. @@ -77,9 +82,10 @@ DISCOVERY_DEFAULT_PARTNER_ID: 1 DISCOVERY_SESSION_EXPIRE_AT_BROWSER_CLOSE: false # Used to automatically configure OAuth2 Client -DISCOVERY_SOCIAL_AUTH_EDX_OIDC_KEY : 'discovery-key' -DISCOVERY_SOCIAL_AUTH_EDX_OIDC_SECRET : 'discovery-secret' -DISCOVERY_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false +DISCOVERY_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'discovery-sso-key' +DISCOVERY_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'discovery-sso-secret' +DISCOVERY_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'discovery-backend-service-key' +DISCOVERY_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'discovery-backend-service-secret' DISCOVERY_PLATFORM_NAME: 'Your Platform Name Here' @@ -109,14 +115,19 @@ DISCOVERY_EMAIL_USE_TLS: False DISCOVERY_EMAIL_HOST_USER: '' DISCOVERY_EMAIL_HOST_PASSWORD: '' +DISCOVERY_ENABLE_PUBLISHER: false DISCOVERY_PUBLISHER_FROM_EMAIL: !!null DISCOVERY_OPENEXCHANGERATES_API_KEY: '' DISCOVERY_GUNICORN_EXTRA: '' +DISCOVERY_GUNICORN_WORKERS: 2 + DISCOVERY_EXTRA_APPS: [] +DISCOVERY_USERNAME_REPLACEMENT_WORKER: "OVERRIDE THIS WITH A VALID USERNAME" + DISCOVERY_REPOS: - PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}" DOMAIN: "{{ COMMON_GIT_MIRROR }}" @@ -147,6 +158,7 @@ discovery_service_config_overrides: EMAIL_HOST_USER: '{{ DISCOVERY_EMAIL_HOST_USER }}' EMAIL_HOST_PASSWORD: '{{ DISCOVERY_EMAIL_HOST_PASSWORD }}' + ENABLE_PUBLISHER: '{{ DISCOVERY_ENABLE_PUBLISHER }}' PUBLISHER_FROM_EMAIL: '{{ DISCOVERY_PUBLISHER_FROM_EMAIL }}' OPENEXCHANGERATES_API_KEY: '{{ DISCOVERY_OPENEXCHANGERATES_API_KEY }}' @@ -155,8 +167,20 @@ discovery_service_config_overrides: PARLER_DEFAULT_LANGUAGE_CODE: '{{DISCOVERY_PARLER_DEFAULT_LANGUAGE_CODE}}' PARLER_LANGUAGES : '{{DISCOVERY_PARLER_LANGUAGES}}' CSRF_COOKIE_SECURE: "{{ DISCOVERY_CSRF_COOKIE_SECURE }}" + CORS_ORIGIN_WHITELIST: "{{ DISCOVERY_CORS_ORIGIN_WHITELIST }}" + + USERNAME_REPLACEMENT_WORKER: "{{ DISCOVERY_USERNAME_REPLACEMENT_WORKER }}" # See edx_django_service_automated_users for an example of what this should be DISCOVERY_AUTOMATED_USERS: {} DISCOVERY_CSRF_COOKIE_SECURE: false +DISCOVERY_CORS_ORIGIN_WHITELIST: [] + +DISCOVERY_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +# Remote config +DISCOVERY_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +DISCOVERY_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +DISCOVERY_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" diff --git a/playbooks/roles/discovery/meta/main.yml b/playbooks/roles/discovery/meta/main.yml index a5c1fab3fe0..a137de1620a 100644 --- a/playbooks/roles/discovery/meta/main.yml +++ b/playbooks/roles/discovery/meta/main.yml @@ -30,6 +30,7 @@ dependencies: edx_django_service_django_settings_module: '{{ DISCOVERY_DJANGO_SETTINGS_MODULE }}' edx_django_service_environment_extra: '{{ discovery_environment }}' edx_django_service_gunicorn_extra: '{{ DISCOVERY_GUNICORN_EXTRA }}' + edx_django_service_gunicorn_workers: '{{ DISCOVERY_GUNICORN_WORKERS }}' edx_django_service_wsgi_name: 'course_discovery' edx_django_service_nginx_port: '{{ DISCOVERY_NGINX_PORT }}' edx_django_service_ssl_nginx_port: '{{ DISCOVERY_SSL_NGINX_PORT }}' @@ -38,14 +39,41 @@ dependencies: edx_django_service_staticfiles_storage: '{{ DISCOVERY_STATICFILES_STORAGE }}' edx_django_service_media_storage_backend: '{{ DISCOVERY_MEDIA_STORAGE_BACKEND }}' edx_django_service_memcache: '{{ DISCOVERY_MEMCACHE }}' - edx_django_service_default_db_host: '{{ DISCOVERY_MYSQL }}' - edx_django_service_default_db_name: '{{ DISCOVERY_DEFAULT_DB_NAME }}' - edx_django_service_default_db_atomic_requests: false - edx_django_service_db_user: '{{ DISCOVERY_MYSQL_USER }}' - edx_django_service_db_password: '{{ DISCOVERY_MYSQL_PASSWORD }}' - edx_django_service_social_auth_edx_oidc_key: '{{ DISCOVERY_SOCIAL_AUTH_EDX_OIDC_KEY }}' - edx_django_service_social_auth_edx_oidc_secret: '{{ DISCOVERY_SOCIAL_AUTH_EDX_OIDC_SECRET }}' - edx_django_service_social_auth_redirect_is_https: '{{ DISCOVERY_SOCIAL_AUTH_REDIRECT_IS_HTTPS }}' + edx_django_service_social_auth_edx_oauth2_key: '{{ DISCOVERY_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ DISCOVERY_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ DISCOVERY_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ DISCOVERY_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' edx_django_service_extra_apps: '{{ DISCOVERY_EXTRA_APPS }}' edx_django_service_session_expire_at_browser_close: '{{ DISCOVERY_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' + edx_django_service_node_version: '{{ discovery_node_version }}' edx_django_service_automated_users: '{{ DISCOVERY_AUTOMATED_USERS }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ DISCOVERY_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_decrypt_config_enabled: '{{ DISCOVERY_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ DISCOVERY_COPY_CONFIG_ENABLED }}' + edx_django_service_max_webserver_upload: 10 + + edx_django_service_databases: + default: + ENGINE: 'django.db.backends.mysql' + NAME: '{{ DISCOVERY_DEFAULT_DB_NAME }}' + USER: '{{ DISCOVERY_MYSQL_USER }}' + PASSWORD: '{{ DISCOVERY_MYSQL_PASSWORD }}' + HOST: '{{ DISCOVERY_MYSQL }}' + PORT: 3306 + ATOMIC_REQUESTS: 'false' + CONN_MAX_AGE: '{{ DISCOVERY_MYSQL_CONN_MAX_AGE }}' + OPTIONS: + connect_timeout: 10 + init_command: "SET sql_mode='STRICT_TRANS_TABLES'" + read_replica: + ENGINE: 'django.db.backends.mysql' + NAME: '{{ DISCOVERY_DEFAULT_DB_NAME }}' + USER: '{{ DISCOVERY_MYSQL_USER }}' + PASSWORD: '{{ DISCOVERY_MYSQL_PASSWORD }}' + HOST: '{{ DISCOVERY_MYSQL_REPLICA_HOST }}' + PORT: 3306 + ATOMIC_REQUESTS: 'false' + CONN_MAX_AGE: '{{ DISCOVERY_MYSQL_CONN_MAX_AGE }}' + OPTIONS: + connect_timeout: 10 + init_command: "SET sql_mode='STRICT_TRANS_TABLES'" diff --git a/playbooks/roles/docker-tools/meta/main.yml b/playbooks/roles/docker-tools/meta/main.yml new file mode 100644 index 00000000000..e53b9cc9399 --- /dev/null +++ b/playbooks/roles/docker-tools/meta/main.yml @@ -0,0 +1,4 @@ +--- + +dependencies: + - common diff --git a/playbooks/roles/docker-tools/tasks/main.yml b/playbooks/roles/docker-tools/tasks/main.yml index 6bd5ed7ea10..280a587762b 100644 --- a/playbooks/roles/docker-tools/tasks/main.yml +++ b/playbooks/roles/docker-tools/tasks/main.yml @@ -50,11 +50,20 @@ - install - install:system-requirements +- name: Are we in a Docker container + shell: echo $(egrep -q 'docker' /proc/self/cgroup && echo 'yes' || echo 'no') + ignore_errors: yes + register: docker_container + tags: + - install + - install:base + - name: start docker service service: name: docker enabled: yes state: started + when: docker_container.stdout != 'yes' tags: - install - install:configuration diff --git a/playbooks/roles/ecommerce/defaults/main.yml b/playbooks/roles/ecommerce/defaults/main.yml index 19096e816cd..0c6f3549fc6 100644 --- a/playbooks/roles/ecommerce/defaults/main.yml +++ b/playbooks/roles/ecommerce/defaults/main.yml @@ -13,6 +13,15 @@ ECOMMERCE_GIT_IDENTITY: !!null +ECOMMERCE_REPOS: + - PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}" + DOMAIN: "{{ COMMON_GIT_MIRROR }}" + PATH: "{{ COMMON_GIT_PATH }}" + REPO: "ecommerce.git" + VERSION: "{{ ECOMMERCE_VERSION }}" + DESTINATION: "{{ ecommerce_code_dir }}" + SSH_KEY: "{{ ECOMMERCE_GIT_IDENTITY }}" + # depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC # and a key being provided via NEWRELIC_LICENSE_KEY ECOMMERCE_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ ecommerce_service_name }}" @@ -27,6 +36,7 @@ ECOMMERCE_DATABASE_NAME: ecommerce ECOMMERCE_DATABASE_USER: ecomm001 ECOMMERCE_DATABASE_PASSWORD: password ECOMMERCE_DATABASE_HOST: localhost +ECOMMERCE_DATABASE_CONN_MAX_AGE: 60 ECOMMERCE_VERSION: "master" ECOMMERCE_DJANGO_SETTINGS_MODULE: "ecommerce.settings.production" @@ -43,18 +53,31 @@ ECOMMERCE_JWT_ALGORITHM: 'HS256' ECOMMERCE_JWT_VERIFY_EXPIRATION: true ECOMMERCE_JWT_DECODE_HANDLER: 'ecommerce.extensions.api.handlers.jwt_decode_handler' ECOMMERCE_JWT_ISSUERS: - - '{{ COMMON_JWT_ISSUER }}' - - 'ecommerce_worker' # Must match the value of JWT_ISSUER configured for the ecommerce worker. + - ISSUER: "{{ COMMON_JWT_ISSUER }}" + AUDIENCE: "{{ COMMON_JWT_AUDIENCE }}" + SECRET_KEY: "{{ COMMON_JWT_SECRET_KEY }}" + - ISSUER: 'ecommerce_worker' # Must match the value of JWT_ISSUER configured for the ecommerce worker. + AUDIENCE: "{{ COMMON_JWT_AUDIENCE }}" + SECRET_KEY: "{{ COMMON_JWT_SECRET_KEY }}" + ECOMMERCE_JWT_LEEWAY: 1 -# NOTE: We have an array of keys to allow for support of multiple when, for example, -# we change keys. This will ensure we continue to operate with JWTs issued signed with the old key -# while migrating to the new key. -ECOMMERCE_JWT_SECRET_KEYS: - - '{{ COMMON_JWT_SECRET_KEY }}' + +ECOMMERCE_ENROLLMENT_FULFILLMENT_TIMEOUT: 7 +ECOMMERCE_LOGGING_ROOT_OVERRIDES: {} +ECOMMERCE_LOGGING_SUBSECTION_OVERRIDES: {} + +# Needed to link to the payment micro-frontend. +ECOMMERCE_PAYMENT_MICROFRONTEND_URL: !!null + +# Sailthru +ECOMMERCE_SAILTHRU_KEY: 'sailthru key here' +ECOMMERCE_SAILTHRU_SECRET: 'sailthru secret here' # Used to automatically configure OAuth2 Client -ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_KEY: 'ecommerce-key' -ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_SECRET: 'ecommerce-secret' +ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'ecommerce-sso-key' +ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'ecommerce-sso-secret' +ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'ecommerce-backend-service-key' +ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'ecommerce-backend-service-secret' ECOMMERCE_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false # Settings for affiliate cookie tracking @@ -128,9 +151,21 @@ ECOMMERCE_PAYMENT_PROCESSOR_CONFIG: client_id: '{{ ECOMMERCE_PAYPAL_CLIENT_ID }}' client_secret: '{{ ECOMMERCE_PAYPAL_CLIENT_SECRET }}' receipt_url: '{{ ECOMMERCE_PAYPAL_RECEIPT_URL }}' - cancel_url: '{{ ECOMMERCE_PAYPAL_CANCEL_URL }}' + cancel_checkout_path: '{{ ECOMMERCE_PAYPAL_CANCEL_URL }}' error_url: '{{ ECOMMERCE_PAYPAL_ERROR_URL }}' +# JWT payload user attribute mapping +ECOMMERCE_EDX_DRF_EXTENSIONS: + JWT_PAYLOAD_USER_ATTRIBUTE_MAPPING: + administrator: 'is_staff' + email: 'email' + full_name: 'full_name' + tracking_context: 'tracking_context' + user_id: 'lms_user_id' + JWT_PAYLOAD_MERGEABLE_USER_ATTRIBUTES: + - 'tracking_context' + OAUTH2_USER_INFO_URL: '{{ edx_django_service_oauth2_url_root }}/user_info' + # Theming ECOMMERCE_PLATFORM_NAME: 'Your Platform Name Here' ECOMMERCE_THEME_SCSS: 'sass/themes/default.scss' @@ -153,6 +188,12 @@ ECOMMERCE_BROKER_URL: '{{ ECOMMERCE_BROKER_TRANSPORT }}://{{ ECOMMERCE_BROKER_US ECOMMERCE_DISCOVERY_SERVICE_URL: 'http://localhost:8008' ECOMMERCE_ENTERPRISE_URL: '{{ ECOMMERCE_LMS_URL_ROOT }}' +ECOMMERCE_CORS_ORIGIN_WHITELIST: [] +ECOMMERCE_CORS_URLS_REGEX: '' +ECOMMERCE_CORS_ALLOW_CREDENTIALS: false + +ECOMMERCE_USERNAME_REPLACEMENT_WORKER: "OVERRIDE THIS WITH A VALID USERNAME" + ecommerce_service_config_overrides: LANGUAGE_COOKIE_NAME: '{{ ECOMMERCE_LANGUAGE_COOKIE_NAME }}' EDX_API_KEY: '{{ ECOMMERCE_EDX_API_KEY }}' @@ -169,7 +210,12 @@ ecommerce_service_config_overrides: JWT_LEEWAY: '{{ ECOMMERCE_JWT_LEEWAY }}' JWT_DECODE_HANDLER: '{{ ECOMMERCE_JWT_DECODE_HANDLER }}' JWT_ISSUERS: '{{ ECOMMERCE_JWT_ISSUERS }}' - JWT_SECRET_KEYS: '{{ ECOMMERCE_JWT_SECRET_KEYS }}' + JWT_PUBLIC_SIGNING_JWK_SET: '{{ COMMON_JWT_PUBLIC_SIGNING_JWK_SET }}' + JWT_AUTH_COOKIE_HEADER_PAYLOAD: '{{ COMMON_JWT_AUTH_COOKIE_HEADER_PAYLOAD }}' + JWT_AUTH_COOKIE_SIGNATURE: '{{ COMMON_JWT_AUTH_COOKIE_SIGNATURE }}' + JWT_AUTH_REFRESH_COOKIE: '{{ COMMON_JWT_AUTH_REFRESH_COOKIE }}' + + EDX_DRF_EXTENSIONS: '{{ ECOMMERCE_EDX_DRF_EXTENSIONS }}' AFFILIATE_COOKIE_KEY: '{{ ECOMMERCE_AFFILIATE_COOKIE_NAME }}' @@ -180,6 +226,9 @@ ecommerce_service_config_overrides: BROKER_URL: '{{ ECOMMERCE_BROKER_URL }}' + SAILTHRU_KEY: '{{ ECOMMERCE_SAILTHRU_KEY }}' + SAILTHRU_SECRET: '{{ ECOMMERCE_SAILTHRU_SECRET }}' + # Theming config COMPREHENSIVE_THEME_DIRS: "{{ ECOMMERCE_COMPREHENSIVE_THEME_DIRS }}" ENABLE_COMPREHENSIVE_THEMING: "{{ ECOMMERCE_ENABLE_COMPREHENSIVE_THEMING }}" @@ -187,13 +236,36 @@ ecommerce_service_config_overrides: CSRF_COOKIE_SECURE: "{{ ECOMMERCE_CSRF_COOKIE_SECURE }}" SESSION_COOKIE_SECURE: '{{ ECOMMERCE_SESSION_COOKIE_SECURE}}' -ECOMMERCE_GUNICORN_WORKER_CLASS: "gevent" + CORS_ORIGIN_WHITELIST: "{{ ECOMMERCE_CORS_ORIGIN_WHITELIST }}" + CORS_URLS_REGEX: "{{ ECOMMERCE_CORS_URLS_REGEX }}" + CORS_ALLOW_CREDENTIALS: "{{ ECOMMERCE_CORS_ALLOW_CREDENTIALS }}" + + USERNAME_REPLACEMENT_WORKER: "{{ ECOMMERCE_USERNAME_REPLACEMENT_WORKER }}" + + PAYMENT_MICROFRONTEND_URL: "{{ ECOMMERCE_PAYMENT_MICROFRONTEND_URL}}" + + ENROLLMENT_FULFILLMENT_TIMEOUT: "{{ ECOMMERCE_ENROLLMENT_FULFILLMENT_TIMEOUT }}" + LOGGING_ROOT_OVERRIDES: "{{ ECOMMERCE_LOGGING_ROOT_OVERRIDES }}" + LOGGING_SUBSECTION_OVERRIDES: "{{ ECOMMERCE_LOGGING_SUBSECTION_OVERRIDES }}" + +ECOMMERCE_GUNICORN_WORKER_CLASS: "sync" +ECOMMERCE_GUNICORN_MAX_REQUESTS: 3000 ECOMMERCE_GUNICORN_EXTRA: "" +ECOMMERCE_GUNICORN_WORKERS: 2 ECOMMERCE_CSRF_COOKIE_SECURE: false ECOMMERCE_SESSION_COOKIE_SECURE: true +# See edx_django_service_automated_users for an example of what this should be +ECOMMERCE_AUTOMATED_USERS: {} + +# Remote config +ECOMMERCE_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +ECOMMERCE_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +ECOMMERCE_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + # # vars are namespace with the module name. # @@ -214,7 +286,15 @@ ecommerce_create_demo_data: false ECOMMERCE_ENABLE_ANTIVIRUS: false ECOMMERCE_ANTIVIRUS_SCAN_DIRECTORY: "{{ ecommerce_code_dir }}" -ECOMMERCE_ENABLE_DJANGO_ADMIN_RESTRICTION: false +ECOMMERCE_ENABLE_ADMIN_URLS_RESTRICTION: false + +ECOMMERCE_ADMIN_URLS: + - admin + +ECOMMERCE_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +# Python 3 conversion +ECOMMERCE_USE_PYTHON3: true # # OS packages @@ -226,13 +306,15 @@ ecommerce_debian_pkgs: - libssl-dev - libffi-dev - libsqlite3-dev + - python-dev + - python3-dev ecommerce_redhat_pkgs: [] ecommerce_post_migrate_commands: - command: './manage.py oscar_populate_countries --initial-only' when: true - - command: './manage.py create_or_update_site --site-id=1 --site-domain={{ ECOMMERCE_ECOMMERCE_URL_ROOT.split("://")[1] }} --partner-code=edX --partner-name="Open edX" --lms-url-root={{ ECOMMERCE_LMS_URL_ROOT }} --client-side-payment-processor=cybersource --payment-processors=cybersource,paypal --client-id={{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_KEY }} --client-secret={{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_SECRET }} --from-email staff@example.com --discovery_api_url={{ ECOMMERCE_DISCOVERY_SERVICE_URL }}/api/v1/' + - command: './manage.py create_or_update_site --site-id=1 --site-domain={{ ECOMMERCE_ECOMMERCE_URL_ROOT.split("://")[1] }} --partner-code=edX --partner-name="Open edX" --lms-url-root={{ ECOMMERCE_LMS_URL_ROOT }} --client-side-payment-processor=cybersource --payment-processors=cybersource,paypal --sso-client-id={{ ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_KEY }} --sso-client-secret={{ ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_SECRET }} --backend-service-client-id={{ ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_KEY }} --backend-service-client-secret={{ ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_SECRET }} --from-email staff@example.com --discovery_api_url={{ ECOMMERCE_DISCOVERY_SERVICE_URL }}/api/v1/' when: '{{ ecommerce_create_demo_data }}' - command: './manage.py create_demo_data --partner=edX' when: '{{ ecommerce_create_demo_data }}' diff --git a/playbooks/roles/ecommerce/meta/main.yml b/playbooks/roles/ecommerce/meta/main.yml index bb747cc1a53..35237112abd 100644 --- a/playbooks/roles/ecommerce/meta/main.yml +++ b/playbooks/roles/ecommerce/meta/main.yml @@ -20,23 +20,37 @@ dependencies: edx_django_service_name: '{{ ecommerce_service_name }}' edx_django_service_config_overrides: '{{ ecommerce_service_config_overrides }}' edx_django_service_debian_pkgs_extra: '{{ ecommerce_debian_pkgs }}' - edx_django_service_gunicorn_port: '{{ ecommerce_gunicorn_port }}' edx_django_service_django_settings_module: '{{ ECOMMERCE_DJANGO_SETTINGS_MODULE }}' + edx_django_service_repos: '{{ ECOMMERCE_REPOS }}' edx_django_service_environment_extra: '{{ ecommerce_environment }}' edx_django_service_gunicorn_extra: '{{ ECOMMERCE_GUNICORN_EXTRA }}' + edx_django_service_gunicorn_port: '{{ ecommerce_gunicorn_port }}' + edx_django_service_gunicorn_worker_class: "{{ ECOMMERCE_GUNICORN_WORKER_CLASS }}" + edx_django_service_gunicorn_max_requests: "{{ ECOMMERCE_GUNICORN_MAX_REQUESTS }}" + edx_django_service_gunicorn_workers: "{{ ECOMMERCE_GUNICORN_WORKERS }}" edx_django_service_nginx_port: '{{ ECOMMERCE_NGINX_PORT }}' edx_django_service_ssl_nginx_port: '{{ ECOMMERCE_SSL_NGINX_PORT }}' - edx_django_service_use_python3: false + edx_django_service_use_python3: '{{ ECOMMERCE_USE_PYTHON3 }}' edx_django_service_language_code: '{{ ECOMMERCE_LANGUAGE_CODE }}' edx_django_service_secret_key: '{{ ECOMMERCE_SECRET_KEY }}' edx_django_service_memcache: '{{ ECOMMERCE_MEMCACHE }}' - edx_django_service_default_db_host: '{{ ECOMMERCE_DATABASE_HOST }}' - edx_django_service_default_db_name: '{{ ECOMMERCE_DATABASE_NAME }}' - edx_django_service_default_db_atomic_requests: true - edx_django_service_db_user: '{{ ECOMMERCE_DATABASE_USER }}' - edx_django_service_db_password: '{{ ECOMMERCE_DATABASE_PASSWORD }}' - edx_django_service_social_auth_edx_oidc_key: '{{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_KEY }}' - edx_django_service_social_auth_edx_oidc_secret: '{{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_SECRET }}' + edx_django_service_databases: + default: + ENGINE: 'django.db.backends.mysql' + NAME: '{{ ECOMMERCE_DATABASE_NAME}}' + USER: '{{ ECOMMERCE_DATABASE_USER }}' + PASSWORD: '{{ ECOMMERCE_DATABASE_PASSWORD }}' + HOST: '{{ ECOMMERCE_DATABASE_HOST }}' + PORT: 3306 + ATOMIC_REQUESTS: true + CONN_MAX_AGE: '{{ ECOMMERCE_DATABASE_CONN_MAX_AGE }}' + OPTIONS: + connect_timeout: 10 + init_command: "SET sql_mode='STRICT_TRANS_TABLES'" + edx_django_service_social_auth_edx_oauth2_key: '{{ ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' edx_django_service_social_auth_redirect_is_https: '{{ ECOMMERCE_SOCIAL_AUTH_REDIRECT_IS_HTTPS }}' edx_django_service_session_expire_at_browser_close: '{{ ECOMMERCE_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' edx_django_service_staticfiles_storage: '{{ ECOMMERCE_STATICFILES_STORAGE }}' @@ -44,8 +58,12 @@ dependencies: edx_django_service_basic_auth_exempted_paths_extra: - payment - \.well-known/apple-developer-merchantid-domain-association - edx_django_service_gunicorn_worker_class: "{{ ECOMMERCE_GUNICORN_WORKER_CLASS }}" - EDX_DJANGO_SERVICE_ENABLE_DJANGO_ADMIN_RESTRICTION: '{{ ECOMMERCE_ENABLE_DJANGO_ADMIN_RESTRICTION }}' + edx_django_service_automated_users: '{{ ECOMMERCE_AUTOMATED_USERS }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ ECOMMERCE_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_decrypt_config_enabled: '{{ ECOMMERCE_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ ECOMMERCE_COPY_CONFIG_ENABLED }}' + EDX_DJANGO_SERVICE_ENABLE_ADMIN_URLS_RESTRICTION: '{{ ECOMMERCE_ENABLE_ADMIN_URLS_RESTRICTION }}' + EDX_DJANGO_SERVICE_ADMIN_URLS: '{{ ECOMMERCE_ADMIN_URLS }}' - role: antivirus ANTIVIRUS_SCAN_DIRECTORY: "{{ ECOMMERCE_ANTIVIRUS_SCAN_DIRECTORY }}" when: ECOMMERCE_ENABLE_ANTIVIRUS diff --git a/playbooks/roles/ecomworker/defaults/main.yml b/playbooks/roles/ecomworker/defaults/main.yml index 67d4e1f025b..c7ad7d1173c 100644 --- a/playbooks/roles/ecomworker/defaults/main.yml +++ b/playbooks/roles/ecomworker/defaults/main.yml @@ -25,6 +25,7 @@ ECOMMERCE_WORKER_REPOS: # Requires that New Relic be enabled via COMMON_ENABLE_NEWRELIC, and that # a key be provided via NEWRELIC_LICENSE_KEY. ECOMMERCE_WORKER_NEWRELIC_APPNAME: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ ecommerce_worker_service_name }}' +ECOMMERCE_WORKER_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false # CELERY ECOMMERCE_WORKER_BROKER_USERNAME: 'celery' @@ -99,6 +100,12 @@ ECOMMERCE_WORKER_JWT_SECRET_KEY: 'insecure-secret-key' ECOMMERCE_WORKER_JWT_ISSUER: 'ecommerce_worker' ECOMMERCE_WORKER_SITE_OVERRIDES: !!null +# Remote config +ECOMMERCE_WORKER_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +ECOMMERCE_WORKER_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +ECOMMERCE_WORKER_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + ECOMMERCE_WORKER_SERVICE_CONFIG: BROKER_URL: '{{ ECOMMERCE_WORKER_BROKER_URL }}' ECOMMERCE_API_ROOT: '{{ ECOMMERCE_WORKER_ECOMMERCE_API_ROOT }}' @@ -120,6 +127,7 @@ ECOMMERCE_WORKER_SERVICE_CONFIG: SAILTHRU_MINIMUM_COST: '{{ ECOMMERCE_WORKER_SAILTHRU_MINIMUM_COST }}' templates: course_refund: 'Course Refund' + assignment_email: 'Offer Assignment Email' # Site-specific configuration overrides. Implemented as a dict of dicts with 'site_code' for keys. # Ecommerce worker will apply these settings instead of their corresponding default values. diff --git a/playbooks/roles/ecomworker/meta/main.yml b/playbooks/roles/ecomworker/meta/main.yml index 510808f0bca..ff307868647 100644 --- a/playbooks/roles/ecomworker/meta/main.yml +++ b/playbooks/roles/ecomworker/meta/main.yml @@ -22,4 +22,5 @@ dependencies: edx_service_packages: debian: "{{ ecommerce_worker_debian_pkgs }}" redhat: "{{ ecommerce_worker_redhat_pkgs }}" - + edx_service_decrypt_config_enabled: "{{ ECOMMERCE_WORKER_DECRYPT_CONFIG_ENABLED }}" + edx_service_copy_config_enabled: "{{ ECOMMERCE_WORKER_COPY_CONFIG_ENABLED }}" diff --git a/playbooks/roles/ecomworker/templates/edx/app/ecomworker/ecomworker.sh.j2 b/playbooks/roles/ecomworker/templates/edx/app/ecomworker/ecomworker.sh.j2 index 57c03c74b9a..d2c355d1c97 100644 --- a/playbooks/roles/ecomworker/templates/edx/app/ecomworker/ecomworker.sh.j2 +++ b/playbooks/roles/ecomworker/templates/edx/app/ecomworker/ecomworker.sh.j2 @@ -10,9 +10,11 @@ {% endif %} {% if COMMON_ENABLE_NEWRELIC_APP %} +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED='{{ ECOMMERCE_WORKER_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' export NEW_RELIC_APP_NAME='{{ ECOMMERCE_WORKER_NEWRELIC_APPNAME }}' export NEW_RELIC_LICENSE_KEY='{{ NEWRELIC_LICENSE_KEY }}' {% endif -%} source {{ ecommerce_worker_home }}/{{ ecommerce_worker_service_name }}_env -{{ executable }} -A ecommerce_worker worker --app ecommerce_worker.celery_app:app --concurrency={{ ECOMMERCE_WORKER_CONCURRENCY }} --loglevel=info --queue=fulfillment,email_marketing +# We exec so that celery is the child of supervisor and can be managed properly +exec {{ executable }} -A ecommerce_worker worker --app ecommerce_worker.celery_app:app --concurrency={{ ECOMMERCE_WORKER_CONCURRENCY }} --loglevel=info --hostname=ecomworker.%%h --queue=ecommerce.fulfillment,ecommerce.email_marketing diff --git a/playbooks/roles/edx_ansible/defaults/main.yml b/playbooks/roles/edx_ansible/defaults/main.yml index 153d57f737a..641462d4653 100644 --- a/playbooks/roles/edx_ansible/defaults/main.yml +++ b/playbooks/roles/edx_ansible/defaults/main.yml @@ -50,5 +50,6 @@ edx_ansible_requirements_files: - "{{ edx_ansible_code_dir }}/requirements.txt" # edX configuration repo -configuration_version: master +CONFIGURATION_VERSION: master +configuration_version: "{{ CONFIGURATION_VERSION }}" edx_ansible_var_file: "{{ edx_ansible_app_dir }}/server-vars.yml" diff --git a/playbooks/roles/edx_ansible/tasks/main.yml b/playbooks/roles/edx_ansible/tasks/main.yml index 8b276d22891..b58197b9dfc 100644 --- a/playbooks/roles/edx_ansible/tasks/main.yml +++ b/playbooks/roles/edx_ansible/tasks/main.yml @@ -47,10 +47,9 @@ - name: Install a bunch of system packages on which edx_ansible relies apt: - name: "{{ item }}" + name: "{{ edx_ansible_debian_pkgs }}" state: present update_cache: true - with_items: "{{ edx_ansible_debian_pkgs }}" tags: - install:system-requirements diff --git a/playbooks/roles/edx_ansible/templates/update.j2 b/playbooks/roles/edx_ansible/templates/update.j2 index aa5e871d218..0c7929a88c9 100644 --- a/playbooks/roles/edx_ansible/templates/update.j2 +++ b/playbooks/roles/edx_ansible/templates/update.j2 @@ -14,8 +14,9 @@ IFS="," -h this - must be one of edx-platform, edx-workers, xqueue, cs_comments_service, credentials, xserver, configuration, - read-only-certificate-code, edx-analytics-data-api, edx-ora2, insights, ecommerce, course_discovery, - notifier, video_web_frontend, video_delivery_worker, veda_pipeline_worker, video_encode_worker, veda_ffmpeg + read-only-certificate-code, edx-analytics-data-api, edx-ora2, insights, ecommerce, discovery, + notifier, video_web_frontend, video_delivery_worker, veda_pipeline_worker, video_encode_worker, veda_ffmpeg, + registrar, program_manager, learner_portal - can be a commit or tag - specify extra_vars to any of the ansible plays with the -e switch and then ecaptulating your vars in "double quotes" example: update -e "-e 'hallo=bye' -e 'bye=hallo'" @@ -59,7 +60,7 @@ edx_ansible_cmd="{{ edx_ansible_venv_bin }}/ansible-playbook -i localhost, -c lo repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'" repos_to_cmd["edx-workers"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2' -e 'celery_worker=true'" -repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2' -e 'elb_pre_post=false'" +repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'XQUEUE_VERSION=$2' -e 'elb_pre_post=false'" repos_to_cmd["credentials"]="$edx_ansible_cmd credentials.yml -e 'credentials_version=$2'" repos_to_cmd["cs_comments_service"]="$edx_ansible_cmd forum.yml -e 'forum_version=$2'" repos_to_cmd["xserver"]="$edx_ansible_cmd xserver.yml -e 'xserver_version=$2'" @@ -70,13 +71,15 @@ repos_to_cmd["edx-ora2"]="$edx_ansible_cmd ora2.yml -e 'ora2_version=$2'" repos_to_cmd["insights"]="$edx_ansible_cmd insights.yml -e 'INSIGHTS_VERSION=$2'" repos_to_cmd["ecommerce"]="$edx_ansible_cmd ecommerce.yml -e 'ECOMMERCE_VERSION=$2'" repos_to_cmd["discovery"]="$edx_ansible_cmd discovery.yml -e 'DISCOVERY_VERSION=$2'" -repos_to_cmd["journals"]="$edx_ansible_cmd journals.yml -e 'JOURNALS_VERSION=$2'" repos_to_cmd["notifier"]="$edx_ansible_cmd notifier.yml -e 'NOTIFIER_VERSION=$2'" repos_to_cmd["video_web_frontend"]="$edx_ansible_cmd veda_web_frontend.yml -e 'VEDA_WEB_FRONTEND_VERSION=$2'" repos_to_cmd["video_delivery_worker"]="$edx_ansible_cmd veda_delivery_worker.yml -e 'VEDA_DELIVERY_WORKER_VERSION=$2'" repos_to_cmd["veda_pipeline_worker"]="$edx_ansible_cmd veda_pipeline_worker.yml -e 'VEDA_PIPELINE_WORKER_VERSION=$2'" repos_to_cmd["video_encode_worker"]="$edx_ansible_cmd veda_encode_worker.yml -e 'VEDA_ENCODE_WORKER_VERSION=$2'" repos_to_cmd["veda_ffmpeg"]="$edx_ansible_cmd veda_ffmpeg.yml -e 'VEDA_FFMPEG_VERSION=$2'" +repos_to_cmd["registrar"]="$edx_ansible_cmd registrar.yml -e 'REGISTRAR_VERSION=$2'" +repos_to_cmd["learner_portal"]="$edx_ansible_cmd learner_portal.yml -e 'LEARNER_PORTAL_VERSION=$2'" +repos_to_cmd["program_manager"]="$edx_ansible_cmd program_manager.yml -e 'PROGRAM_MANAGER_VERSION=$2'" if [[ -z $1 || -z $2 ]]; then echo diff --git a/playbooks/roles/edx_django_service/defaults/main.yml b/playbooks/roles/edx_django_service/defaults/main.yml index 37d59599785..3ba23a912a7 100644 --- a/playbooks/roles/edx_django_service/defaults/main.yml +++ b/playbooks/roles/edx_django_service/defaults/main.yml @@ -23,6 +23,8 @@ edx_django_service_version: 'master' edx_django_service_git_identity: null edx_django_service_django_settings_module: null +edx_django_service_app_config_file: "{{ COMMON_CFG_DIR }}/{{ edx_django_service_name }}.yml" + edx_django_service_code_dir: '{{ edx_django_service_home }}/{{ edx_django_service_name }}' edx_django_service_venv_dir: '{{ edx_django_service_home }}/venvs/{{ edx_django_service_name }}' edx_django_service_venv_bin_dir: '{{ edx_django_service_venv_dir }}/bin' @@ -31,7 +33,8 @@ edx_django_service_nodeenv_dir: '{{ edx_django_service_home }}/nodeenvs/{{ edx_d edx_django_service_nodeenv_bin: '{{ edx_django_service_nodeenv_dir }}/bin' edx_django_service_node_modules_dir: '{{ edx_django_service_code_dir }}/node_modules' edx_django_service_node_bin: '{{ edx_django_service_node_modules_dir }}/.bin' -edx_django_service_node_version: '{{ common_node_version }}' +edx_django_service_node_version: '12.13.0' +edx_django_service_npm_version: '6.12.1' edx_django_service_environment_default: DJANGO_SETTINGS_MODULE: '{{ edx_django_service_django_settings_module }}' @@ -60,14 +63,19 @@ edx_django_service_gunicorn_port: null edx_django_service_gunicorn_timeout: 300 edx_django_service_gunicorn_workers: 2 edx_django_service_gunicorn_worker_class: 'gevent' +edx_django_service_gunicorn_max_requests: null edx_django_service_cors_whitelist: [] +edx_django_service_allow_cors_headers: false +edx_django_service_max_webserver_upload: !!null +edx_django_service_allow_cors_credentials: false nginx_edx_django_service_gunicorn_hosts: - 127.0.0.1 edx_django_service_hostname: '~^((stage|prod)-)?{{ edx_django_service_name }}.*' edx_django_service_nginx_port: '1{{ edx_django_service_gunicorn_port }}' +edx_django_service_nginx_read_timeout: !!null EDX_DJANGO_SERVICE_ENABLE_S3_MAINTENANCE: false EDX_DJANGO_SERVICE_S3_MAINTENANCE_FILE: "/edx-static/maintenance/maintenance.html" @@ -80,6 +88,7 @@ edx_django_service_basic_auth_exempted_paths_extra: [] edx_django_service_basic_auth_exempted_paths: '{{ edx_django_service_basic_auth_exempted_paths_default + edx_django_service_basic_auth_exempted_paths_extra }}' edx_django_service_newrelic_appname: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ edx_django_service_name }}' +edx_django_service_enable_newrelic_distributed_tracing: false edx_django_service_repos: - PROTOCOL: '{{ edx_django_service_git_protocol }}' @@ -116,6 +125,7 @@ edx_django_service_caches: edx_django_service_default_db_host: 'localhost' edx_django_service_default_db_name: '{{ edx_django_service_name }}' edx_django_service_default_db_atomic_requests: false +edx_django_service_default_db_conn_max_age: 60 edx_django_service_db_user: 'REPLACE-ME' edx_django_service_db_password: 'password' edx_django_service_db_options: @@ -131,17 +141,19 @@ edx_django_service_databases: HOST: '{{ edx_django_service_default_db_host }}' PORT: '3306' ATOMIC_REQUESTS: '{{ edx_django_service_default_db_atomic_requests }}' - CONN_MAX_AGE: 60 + CONN_MAX_AGE: '{{ edx_django_service_default_db_conn_max_age }}' OPTIONS: '{{ edx_django_service_db_options }}' -edx_django_service_social_auth_edx_oidc_key: '{{ edx_django_service_name }}-key' -edx_django_service_social_auth_edx_oidc_secret: '{{ edx_django_service_name }}-secret' +edx_django_service_social_auth_edx_oauth2_key: '{{ edx_django_service_name }}-sso-key' +edx_django_service_social_auth_edx_oauth2_secret: '{{ edx_django_service_name }}-sso-secret' +edx_django_service_backend_service_edx_oauth2_key: '{{ edx_django_service_name }}-backend-service-key' +edx_django_service_backend_service_edx_oauth2_secret: '{{ edx_django_service_name }}-backend-service-secret' edx_django_service_social_auth_redirect_is_https: false -edx_django_service_oauth_public_url_root: '{{ COMMON_OAUTH_PUBLIC_URL_ROOT }}' -edx_django_service_oauth_url_root: '{{COMMON_OAUTH_URL_ROOT }}' -edx_django_service_oidc_logout_url: '{{ COMMON_OAUTH_LOGOUT_URL }}' -edx_django_service_oidc_issuer: '{{ COMMON_OIDC_ISSUER }}' +edx_django_service_oauth2_url_root: '{{ COMMON_LMS_BASE_URL }}' +edx_django_service_oauth2_issuer: '{{ COMMON_LMS_BASE_URL }}' +edx_django_service_oauth2_logout_url: '{{ COMMON_OAUTH_LOGOUT_URL }}' +edx_django_service_oauth2_provider_url: '{{ COMMON_OAUTH_PUBLIC_URL_ROOT }}' edx_django_service_jwt_audience: '{{ COMMON_JWT_AUDIENCE }}' edx_django_service_jwt_issuer: '{{ COMMON_JWT_ISSUER }}' @@ -154,9 +166,15 @@ edx_django_service_jwt_auth: - AUDIENCE: '{{ edx_django_service_jwt_audience }}' ISSUER: '{{ edx_django_service_jwt_issuer }}' SECRET_KEY: '{{ edx_django_service_jwt_secret_key }}' + JWT_PUBLIC_SIGNING_JWK_SET: '{{ COMMON_JWT_PUBLIC_SIGNING_JWK_SET|string }}' + JWT_AUTH_COOKIE_HEADER_PAYLOAD: '{{ COMMON_JWT_AUTH_COOKIE_HEADER_PAYLOAD }}' + JWT_AUTH_COOKIE_SIGNATURE: '{{ COMMON_JWT_AUTH_COOKIE_SIGNATURE }}' + JWT_AUTH_REFRESH_COOKIE: '{{ COMMON_JWT_AUTH_REFRESH_COOKIE }}' edx_django_service_extra_apps: [] +edx_django_service_api_root: !!null + edx_django_service_config_default: LANGUAGE_CODE: '{{ edx_django_service_language_code }}' SECRET_KEY: '{{ edx_django_service_secret_key }}' @@ -169,36 +187,34 @@ edx_django_service_config_default: CACHES: '{{ edx_django_service_caches }}' DATABASES: '{{ edx_django_service_databases }}' - SOCIAL_AUTH_EDX_OIDC_KEY: '{{ edx_django_service_social_auth_edx_oidc_key }}' - SOCIAL_AUTH_EDX_OIDC_SECRET: '{{ edx_django_service_social_auth_edx_oidc_secret }}' - SOCIAL_AUTH_EDX_OIDC_ID_TOKEN_DECRYPTION_KEY: '{{ edx_django_service_social_auth_edx_oidc_secret }}' - SOCIAL_AUTH_EDX_OIDC_URL_ROOT: '{{ edx_django_service_oauth_url_root }}' - SOCIAL_AUTH_EDX_OIDC_PUBLIC_URL_ROOT: '{{ edx_django_service_oauth_public_url_root }}' SOCIAL_AUTH_REDIRECT_IS_HTTPS: '{{ edx_django_service_social_auth_redirect_is_https }}' - SOCIAL_AUTH_EDX_OIDC_LOGOUT_URL: '{{ edx_django_service_oidc_logout_url }}' - SOCIAL_AUTH_EDX_OIDC_ISSUER: '{{ edx_django_service_oidc_issuer }}' + + SOCIAL_AUTH_EDX_OAUTH2_KEY: '{{ edx_django_service_social_auth_edx_oauth2_key }}' + SOCIAL_AUTH_EDX_OAUTH2_SECRET: '{{ edx_django_service_social_auth_edx_oauth2_secret }}' + SOCIAL_AUTH_EDX_OAUTH2_ISSUER: '{{ edx_django_service_oauth2_issuer }}' + SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: '{{ edx_django_service_oauth2_url_root }}' + SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: '{{ edx_django_service_oauth2_logout_url }}' + + BACKEND_SERVICE_EDX_OAUTH2_KEY: '{{ edx_django_service_backend_service_edx_oauth2_key }}' + BACKEND_SERVICE_EDX_OAUTH2_SECRET: '{{ edx_django_service_backend_service_edx_oauth2_secret }}' + BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: '{{ edx_django_service_oauth2_provider_url }}' JWT_AUTH: '{{ edx_django_service_jwt_auth }}' EXTRA_APPS: '{{ edx_django_service_extra_apps }}' EDX_DRF_EXTENSIONS: - OAUTH2_USER_INFO_URL: '{{ edx_django_service_oauth_url_root }}/user_info' + OAUTH2_USER_INFO_URL: '{{ edx_django_service_oauth2_url_root }}/user_info' SESSION_EXPIRE_AT_BROWSER_CLOSE: '{{ edx_django_service_session_expire_at_browser_close }}' + API_ROOT: '{{ edx_django_service_api_root }}' # NOTE: This should be overridden by inheriting service-specific role. edx_django_service_config_overrides: {} edx_django_service_config: '{{ edx_django_service_config_default|combine(edx_django_service_config_overrides) }}' -edx_django_service_automated_users: - automated_user: - sudo_commands: - - command: '{{ edx_django_service_venv_dir }}/python {{ edx_django_service_code_dir }}/manage.py showmigrations' - sudo_user: '{{ edx_django_service_user }}' - authorized_keys: - - 'SSH authorized key' +edx_django_service_automated_users: {} # This array contains commands that should be run after migration. # @@ -219,4 +235,25 @@ edx_django_service_automated_users: # edx_django_service_post_migrate_commands: [] -EDX_DJANGO_SERVICE_ENABLE_DJANGO_ADMIN_RESTRICTION: false +# This is a comma seperated list of services which will trigger checking migrations if they are found in the +# ec2 "services" tag. For most services this just needs to be the edx_django_service_name. In some cases it needs to be +# overidden, such as in the case of having workers. For example for edxapp it need to be "lms,cms,workers" and for +# enterprise_catalog it's "enterprise_catalog,enterprise_catalog_worker" +edx_django_service_migration_check_services: "{{ edx_django_service_name }}" + +EDX_DJANGO_SERVICE_ENABLE_ADMIN_URLS_RESTRICTION: false + +EDX_DJANGO_SERVICE_ADMIN_URLS: [] + +edx_django_service_enable_celery_workers: false +edx_django_service_workers: [] +edx_django_service_celery_heartbeat_enabled: true +edx_django_service_lang: 'en_US.UTF-8' +edx_django_service_default_stopwaitsecs: 432000 +edx_django_service_workers_supervisor_conf: '{{ edx_django_service_name }}-workers.conf' + +edx_django_service_enable_experimental_docker_shim: false +# Default to blank if edx_django_service_enable_experimental_docker_shim is false +edx_django_service_docker_run_command_make_migrate: "" +edx_django_service_docker_run_command_make_static: "" +edx_django_service_docker_image_name: 'openedx/{{ edx_django_service_name }}' diff --git a/playbooks/roles/edx_django_service/meta/main.yml b/playbooks/roles/edx_django_service/meta/main.yml index 80681557f0e..16d49c7807c 100644 --- a/playbooks/roles/edx_django_service/meta/main.yml +++ b/playbooks/roles/edx_django_service/meta/main.yml @@ -1,7 +1,19 @@ --- dependencies: - common - - supervisor + - role: docker-tools + when: edx_django_service_enable_experimental_docker_shim + docker_users: + - "www-data" + - "{{ supervisor_user }}" + - "{{ edx_django_service_user }}" + - role: supervisor + supervisor_spec: + - service: "{{ edx_django_service_name }}" + migration_check_services: "{{ edx_django_service_migration_check_services }}" + python: "python.{{ edx_django_service_name }}" + code: "{{ edx_django_service_code_dir }}" + env: "{{ edx_django_service_home }}/{{ edx_django_service_name }}_env" - role: automated AUTOMATED_USERS: "{{ edx_django_service_automated_users }}" - role: edx_service @@ -13,3 +25,5 @@ dependencies: edx_service_packages: debian: "{{ edx_django_service_debian_pkgs }}" redhat: [] + edx_service_decrypt_config_enabled: "{{ edx_django_service_decrypt_config_enabled }}" + edx_service_copy_config_enabled: "{{ edx_django_service_copy_config_enabled }}" diff --git a/playbooks/roles/edx_django_service/tasks/main.yml b/playbooks/roles/edx_django_service/tasks/main.yml index 905819bd0b3..f32794ccdfb 100644 --- a/playbooks/roles/edx_django_service/tasks/main.yml +++ b/playbooks/roles/edx_django_service/tasks/main.yml @@ -21,11 +21,42 @@ # # +- name: Get service user numeric id for docker + command: "id -u {{ edx_django_service_user }}" + register: edx_django_service_user_id + when: edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + +- name: Docker build non-newrelic + when: edx_django_service_enable_experimental_docker_shim + command: docker build {{ edx_django_service_code_dir }} --target app -t {{ edx_django_service_docker_image_name }}:latest + args: + chdir: "{{ edx_django_service_code_dir }}" + become_user: "{{ edx_django_service_user }}" + environment: "{{ edx_django_service_environment }}" + tags: + - install + - install:app-requirements + +- name: Docker build newrelic + when: edx_django_service_enable_experimental_docker_shim and COMMON_ENABLE_NEWRELIC + command: docker build {{ edx_django_service_code_dir }} --target newrelic -t {{ edx_django_service_docker_image_name }}:latest-newrelic + args: + chdir: "{{ edx_django_service_code_dir }}" + become_user: "{{ edx_django_service_user }}" + environment: "{{ edx_django_service_environment }}" + tags: + - install + - install:app-requirements + - name: add gunicorn configuration file template: src: "edx/app/app/app_gunicorn.py.j2" dest: "{{ edx_django_service_home }}/{{ edx_django_service_name }}_gunicorn.py" become_user: "{{ edx_django_service_user }}" + when: not edx_django_service_enable_experimental_docker_shim tags: - install - install:configuration @@ -33,7 +64,7 @@ - name: install python3 apt: name: "{{ item }}" - when: edx_django_service_use_python3 + when: edx_django_service_use_python3 and not edx_django_service_enable_experimental_docker_shim with_items: - python3-pip - python3-dev @@ -46,7 +77,7 @@ args: creates: "{{ edx_django_service_venv_dir }}/bin/pip" become_user: "{{ edx_django_service_user }}" - when: edx_django_service_use_python3 + when: edx_django_service_use_python3 and not edx_django_service_enable_experimental_docker_shim tags: - install - install:system-requirements @@ -56,7 +87,7 @@ args: creates: "{{ edx_django_service_venv_dir }}/bin/pip" become_user: "{{ edx_django_service_user }}" - when: not edx_django_service_use_python3 + when: not edx_django_service_use_python3 and not edx_django_service_enable_experimental_docker_shim tags: - install - install:system-requirements @@ -64,6 +95,7 @@ - name: Pin pip to a specific version. command: "{{ edx_django_service_venv_dir }}/bin/pip install pip=={{ common_pip_version }}" become_user: "{{ edx_django_service_user }}" + when: not edx_django_service_enable_experimental_docker_shim tags: - install - install:system-requirements @@ -73,6 +105,7 @@ - name: install nodenv command: pip install nodeenv become_user: "{{ edx_django_service_user }}" + when: not edx_django_service_enable_experimental_docker_shim environment: "{{ edx_django_service_environment }}" tags: - install @@ -83,16 +116,28 @@ args: creates: "{{ edx_django_service_nodeenv_dir }}" become_user: "{{ edx_django_service_user }}" + when: not edx_django_service_enable_experimental_docker_shim environment: "{{ edx_django_service_environment }}" tags: - install - install:system-requirements +- name: upgrade npm + command: "npm install -g npm@{{ edx_django_service_npm_version }}" + become_user: "{{ edx_django_service_user }}" + when: not edx_django_service_enable_experimental_docker_shim + environment: "{{ edx_django_service_environment }}" + tags: + - install + - install:system-requirements + when: edx_django_service_npm_version is defined and not edx_django_service_enable_experimental_docker_shim + - name: install production requirements command: make production-requirements args: chdir: "{{ edx_django_service_code_dir }}" become_user: "{{ edx_django_service_user }}" + when: not edx_django_service_enable_experimental_docker_shim environment: "{{ edx_django_service_environment }}" tags: - install @@ -104,15 +149,35 @@ chdir: "{{ edx_django_service_code_dir }}" become_user: "{{ edx_django_service_user }}" environment: "{{ edx_django_service_environment }}" - when: edx_django_service_is_devstack is defined and edx_django_service_is_devstack + when: edx_django_service_is_devstack is defined and edx_django_service_is_devstack and not edx_django_service_enable_experimental_docker_shim tags: - install - install:app-requirements - devstack - devstack:install +- name: Check for existing make_migrate container + command: "docker ps -aq --filter name='{{ edx_django_service_name }}.make_migrate'" + register: edx_django_service_make_migrate_container + when: edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + +- name: Delete existing make_migrate container + when: edx_django_service_enable_experimental_docker_shim and edx_django_service_make_migrate_container.stdout != "" + command: "docker rm {{ edx_django_service_make_migrate_container.stdout }}" + +- name: Set edx_django_service_docker_run_command_make_migrate if docker shim enabled + set_fact: + edx_django_service_docker_run_command_make_migrate: "docker run --user root --name {{ edx_django_service_name }}.make_migrate --env DB_MIGRATION_USER --env DB_MIGRATION_PASS --mount type=bind,src={{ edx_django_service_app_config_file }},dst={{ edx_django_service_app_config_file }} {{ edx_django_service_docker_image_name }}:latest" + when: edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + - name: migrate database - command: make migrate + command: "{{ edx_django_service_docker_run_command_make_migrate }} make migrate" args: chdir: "{{ edx_django_service_code_dir }}" become_user: "{{ edx_django_service_user }}" @@ -130,7 +195,7 @@ become_user: "{{ edx_django_service_user }}" environment: "{{ edx_django_service_environment }}" with_items: '{{ edx_django_service_post_migrate_commands }}' - when: migrate_db is defined and migrate_db|lower == "yes" and item.when | bool + when: migrate_db is defined and migrate_db|lower == "yes" and item.when|bool and not edx_django_service_enable_experimental_docker_shim run_once: yes tags: - migrate @@ -159,6 +224,18 @@ - install - install:configuration +- name: write out the celery workers supervisor wrapper + when: edx_django_service_enable_celery_workers + template: + src: "edx/app/app/app-workers.sh.j2" + dest: "{{ edx_django_service_home }}/{{ edx_django_service_name }}-workers.sh" + mode: 0650 + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + tags: + - install + - install:configuration + - name: write supervisord config template: src: "edx/app/supervisor/conf.d.available/app.conf.j2" @@ -170,6 +247,18 @@ - install - install:configuration +- name: write celery workers supervisord config + when: edx_django_service_enable_celery_workers + template: + src: "edx/app/supervisor/conf.d.available/app-workers.conf.j2" + dest: "{{ supervisor_available_dir }}/{{ edx_django_service_workers_supervisor_conf }}" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0644 + tags: + - install + - install:configuration + - name: write devstack script template: src: "edx/app/app/devstack.sh.j2" @@ -177,7 +266,7 @@ owner: "{{ supervisor_user }}" group: "{{ common_web_user }}" mode: 0744 - when: edx_django_service_is_devstack is defined and edx_django_service_is_devstack + when: edx_django_service_is_devstack is defined and edx_django_service_is_devstack and not edx_django_service_enable_experimental_docker_shim tags: - devstack - devstack:install @@ -189,6 +278,7 @@ owner: "{{ edx_django_service_user }}" group: "{{ edx_django_service_user }}" mode: 0644 + when: not edx_django_service_enable_experimental_docker_shim tags: - install - install:configuration @@ -216,6 +306,7 @@ src: "{{ edx_django_service_venv_dir }}/bin/{{ item }}" dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ edx_django_service_name }}" state: link + when: not edx_django_service_enable_experimental_docker_shim with_items: - python - pip @@ -229,14 +320,35 @@ src: "{{ edx_django_service_code_dir }}/{{ item }}" dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ edx_django_service_name }}" state: link + when: not edx_django_service_enable_experimental_docker_shim with_items: - manage.py tags: - install - install:app-requirements +- name: Check for existing make_static container + command: "docker ps -aq --filter name='{{ edx_django_service_name }}.make_static'" + register: edx_django_service_make_static_container + when: edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + +- name: Delete existing make_static container + when: edx_django_service_enable_experimental_docker_shim and edx_django_service_make_static_container.stdout != "" + command: "docker rm {{ edx_django_service_make_static_container.stdout }}" + +- name: Set edx_django_service_docker_run_command_make_static if docker shim enabled + set_fact: + edx_django_service_docker_run_command_make_static: "docker run --user root --name {{ edx_django_service_name }}.make_static --mount type=bind,src={{ edx_django_service_static_root }},dst={{ edx_django_service_static_root }} --mount type=bind,src={{ edx_django_service_app_config_file }},dst={{ edx_django_service_app_config_file }} {{ edx_django_service_docker_image_name }}:latest" + when: edx_django_service_enable_experimental_docker_shim + tags: + - install + - install:system-requirements + - name: compile static assets - command: make static + command: "{{ edx_django_service_docker_run_command_make_static }} make static" args: chdir: "{{ edx_django_service_code_dir }}" become_user: "{{ edx_django_service_user }}" @@ -283,3 +395,15 @@ tags: - install - install:vhosts + +#TODO jdmulloy docker build +- name: Include JWT signature setting in the app config file + include_role: + name: jwt_signature + when: CONFIGURE_JWTS + vars: + app_name: '{{ edx_django_service_name }}' + app_config_file: "{{ edx_django_service_app_config_file }}" + app_config_owner: root + app_config_group: root + app_config_mode: 0644 diff --git a/playbooks/roles/edx_django_service/templates/edx/app/app/app-workers.sh.j2 b/playbooks/roles/edx_django_service/templates/edx/app/app/app-workers.sh.j2 new file mode 100644 index 00000000000..f1dd4d1b37d --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/app/app-workers.sh.j2 @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +{% set edx_django_service_venv_bin = edx_django_service_venv_dir + "/bin" %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = edx_django_service_venv_bin + '/newrelic-admin run-program ' + edx_django_service_venv_bin + '/celery' %} +{% else %} +{% set executable = edx_django_service_venv_bin + '/celery' %} +{% endif %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +HOSTNAME=$(hostname) +export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME" +if command -v ec2metadata >/dev/null 2>&1; then + INSTANCEID=$(ec2metadata --instance-id); + export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME-$INSTANCEID" +fi +{% endif %} + +source {{ edx_django_service_home }}/{{ edx_django_service_name }}_env + +{% if edx_django_service_enable_experimental_docker_shim %} + +OLD_CONTAINERS=$(docker ps -aq --filter name="^{{ edx_django_service_name }}-worker-${DOCKER_WORKER_QUEUE}-${DOCKER_WORKER_NUM}\$") + +# Delete old container so we can reuse the contianer name +if [[ -n "${OLD_CONTAINERS}" ]]; then + docker rm ${OLD_CONTAINERS} > /dev/null 2>&1 +fi + +# We exec so that celery is the child of supervisor and can be managed properly +exec docker run \ + --name {{ edx_django_service_name }}-worker-${DOCKER_WORKER_QUEUE}-${DOCKER_WORKER_NUM} \ + --mount type=bind,src={{ edx_django_service_app_config_file }},dst={{ edx_django_service_app_config_file }} \ +{% if COMMON_ENABLE_NEWRELIC_APP %} + --env NEW_RELIC_DISTRIBUTED_TRACING_ENABLED \ + --env NEW_RELIC_APP_NAME \ + --env NEW_RELIC_PROCESS_HOST_DISPLAY_NAME \ + --env NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" \ +{% endif -%} +{% for name,value in edx_django_service_environment.items() -%} +{%- if value %} + --env {{ name }} \ +{% endif %} +{%- endfor %} +{% if COMMON_ENABLE_NEWRELIC_APP %} + {{ edx_django_service_docker_image_name }}:latest-newrelic \ + newrelic-admin run-program \ +{% else %} + {{ edx_django_service_docker_image_name }}:latest \ +{% endif %} + celery \ + $@ +{% else %} +# We exec so that celery is the child of supervisor and can be managed properly +exec {{ executable }} $@ +{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/app/app.sh.j2 b/playbooks/roles/edx_django_service/templates/edx/app/app/app.sh.j2 index 274d2016f83..f77a68f132f 100644 --- a/playbooks/roles/edx_django_service/templates/edx/app/app/app.sh.j2 +++ b/playbooks/roles/edx_django_service/templates/edx/app/app/app.sh.j2 @@ -11,14 +11,50 @@ {% endif %} {% if COMMON_ENABLE_NEWRELIC_APP %} +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED="{{ edx_django_service_enable_newrelic_distributed_tracing }}" export NEW_RELIC_APP_NAME="{{ edx_django_service_newrelic_appname }}" +HOSTNAME=$(hostname) +export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME" if command -v ec2metadata >/dev/null 2>&1; then INSTANCEID=$(ec2metadata --instance-id); - HOSTNAME=$(hostname) export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME-$INSTANCEID" fi export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" {% endif -%} +export EDX_REST_API_CLIENT_NAME="{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ edx_django_service_name }}" + source {{ edx_django_service_home }}/{{ edx_django_service_name }}_env -{{ executable }} -c {{ edx_django_service_home }}/{{ edx_django_service_name }}_gunicorn.py {{ edx_django_service_gunicorn_extra }} {{ edx_django_service_wsgi_name }}.wsgi:application + +{% if edx_django_service_enable_experimental_docker_shim %} + +OLD_CONTAINERS=$(docker ps -aq --filter name='^{{ edx_django_service_name }}$') + +# Delete old container so we can reuse the contianer name +if [[ -n "${OLD_CONTAINERS}" ]]; then + docker rm ${OLD_CONTAINERS} > /dev/null 2>&1 +fi + +exec docker run \ + --name {{ edx_django_service_name }} \ + --mount type=bind,src={{ edx_django_service_app_config_file }},dst={{ edx_django_service_app_config_file }} \ + --publish 127.0.0.1:{{ edx_django_service_gunicorn_port }}:{{ edx_django_service_gunicorn_port }}/tcp \ +{% if COMMON_ENABLE_NEWRELIC_APP %} + --env NEW_RELIC_DISTRIBUTED_TRACING_ENABLED \ + --env NEW_RELIC_APP_NAME \ + --env NEW_RELIC_PROCESS_HOST_DISPLAY_NAME \ + --env NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" \ +{% endif -%} +{% for name,value in edx_django_service_environment.items() -%} +{%- if value %} + --env {{ name }} \ +{% endif %} +{%- endfor %} +{% if COMMON_ENABLE_NEWRELIC_APP %} + {{ edx_django_service_docker_image_name }}:latest-newrelic +{% else %} + {{ edx_django_service_docker_image_name }}:latest +{% endif %} +{% else %} +exec {{ executable }} -c {{ edx_django_service_home }}/{{ edx_django_service_name }}_gunicorn.py {{ edx_django_service_gunicorn_extra }} {{ edx_django_service_wsgi_name }}.wsgi:application +{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/app/app_gunicorn.py.j2 b/playbooks/roles/edx_django_service/templates/edx/app/app/app_gunicorn.py.j2 index 2376f56b32f..f9780ceaf3b 100644 --- a/playbooks/roles/edx_django_service/templates/edx/app/app/app_gunicorn.py.j2 +++ b/playbooks/roles/edx_django_service/templates/edx/app/app/app_gunicorn.py.j2 @@ -9,4 +9,10 @@ pythonpath = "{{ edx_django_service_code_dir }}" workers = {{ edx_django_service_gunicorn_workers }} worker_class = "{{ edx_django_service_gunicorn_worker_class }}" +{% if edx_django_service_gunicorn_max_requests -%} +max_requests = {{ edx_django_service_gunicorn_max_requests }} +{% endif %} + {{ edx_django_service_gunicorn_extra_conf }} + +{{ common_pre_request }} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/app/devstack.sh.j2 b/playbooks/roles/edx_django_service/templates/edx/app/app/devstack.sh.j2 index ce98b9ce474..e31b87dc5e1 100644 --- a/playbooks/roles/edx_django_service/templates/edx/app/app/devstack.sh.j2 +++ b/playbooks/roles/edx_django_service/templates/edx/app/app/devstack.sh.j2 @@ -6,6 +6,9 @@ source {{ edx_django_service_home }}/{{ edx_django_service_name }}_env COMMAND=$1 case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; open) . {{ edx_django_service_nodeenv_bin }}/activate . {{ edx_django_service_venv_bin_dir }}/activate @@ -13,4 +16,16 @@ case $COMMAND in /bin/bash ;; + exec) + shift + + . {{ edx_django_service_nodeenv_bin }}/activate + . {{ edx_django_service_venv_bin_dir }}/activate + cd {{ edx_django_service_code_dir }} + + "$@" + ;; + *) + "$@" + ;; esac diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/admin_urls_access_from_restricted_cidrs.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/admin_urls_access_from_restricted_cidrs.j2 new file mode 100644 index 00000000000..52d79ba7913 --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/admin_urls_access_from_restricted_cidrs.j2 @@ -0,0 +1,11 @@ +{% if NGINX_ADMIN_ACCESS_CIDRS and EDX_DJANGO_SERVICE_ENABLE_ADMIN_URLS_RESTRICTION %} + location ~ ^/({{ EDX_DJANGO_SERVICE_ADMIN_URLS|join("|") }}) { + real_ip_header X-Forwarded-For; + set_real_ip_from {{ NGINX_TRUSTED_IP_CIDRS }}; + {% for cidr in NGINX_ADMIN_ACCESS_CIDRS %} + allow {{ cidr }}; + {% endfor %} + deny all; + try_files $uri @proxy_to_app; + } +{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/app-common.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/app-common.j2 index a15c06af036..83e503dab64 100644 --- a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/app-common.j2 +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/app-common.j2 @@ -2,4 +2,9 @@ {% include "concerns/s3_maintenance.j2" %} {% include "concerns/static-assets.j2" %} {% include "concerns/proxy-to-app.j2" %} - +{% if edx_django_service_max_webserver_upload %} + client_max_body_size {{ edx_django_service_max_webserver_upload }}M; +{% endif %} +{% if edx_django_service_nginx_read_timeout %} + proxy_read_timeout {{ edx_django_service_nginx_read_timeout }}; +{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/cors-add-header.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/cors-add-header.j2 index 3f181418a8d..8c693f4f9ca 100644 --- a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/cors-add-header.j2 +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/cors-add-header.j2 @@ -1 +1,16 @@ -add_header 'Access-Control-Allow-Origin' $cors_origin; + if ($request_method = 'OPTIONS') { + add_header 'Access-Control-Allow-Origin' $cors_origin; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; + add_header 'Access-Control-Allow-Headers' 'Authorization, USE-JWT-COOKIE'; + {% if edx_django_service_allow_cors_credentials %} + add_header 'Access-Control-Allow-Credentials' true; + {% endif %} + add_header 'Access-Control-Max-Age' 86400; + add_header 'Content-Type' 'text/plain; charset=utf-8'; + add_header 'Content-Length' 0; + return 204; + } + + add_header 'Access-Control-Allow-Origin' $cors_origin always; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always; + add_header 'Access-Control-Allow-Credentials' true always; diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/django_admin_access_from_restricted_cidrs.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/django_admin_access_from_restricted_cidrs.j2 deleted file mode 100644 index d134f6102b8..00000000000 --- a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/django_admin_access_from_restricted_cidrs.j2 +++ /dev/null @@ -1,9 +0,0 @@ -{% if NGINX_DJANGO_ADMIN_ACCESS_CIDRS and EDX_DJANGO_SERVICE_ENABLE_DJANGO_ADMIN_RESTRICTION %} - location /admin { - {% for cidr in NGINX_DJANGO_ADMIN_ACCESS_CIDRS %} - allow {{ cidr }}; - {% endfor %} - deny all; - try_files $uri @proxy_to_app; - } -{% endif %} diff --git a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/proxy-to-app.j2 b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/proxy-to-app.j2 index 51d130eed74..2271dbbf082 100644 --- a/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/proxy-to-app.j2 +++ b/playbooks/roles/edx_django_service/templates/edx/app/nginx/sites-available/concerns/proxy-to-app.j2 @@ -10,9 +10,14 @@ location ~ ^/({{ edx_django_service_basic_auth_exempted_paths | join('|') }})/ { {% include 'concerns/robots.j2' %} -{% include "concerns/django_admin_access_from_restricted_cidrs.j2" %} +{% include "concerns/admin_urls_access_from_restricted_cidrs.j2" %} location @proxy_to_app { + +{% if edx_django_service_allow_cors_headers %} + {% include 'concerns/cors-add-header.j2' %} +{% endif %} + {% if NGINX_SET_X_FORWARDED_HEADERS %} proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Port $server_port; diff --git a/playbooks/roles/edx_django_service/templates/edx/app/supervisor/conf.d.available/app-workers.conf.j2 b/playbooks/roles/edx_django_service/templates/edx/app/supervisor/conf.d.available/app-workers.conf.j2 new file mode 100644 index 00000000000..368ad2a092a --- /dev/null +++ b/playbooks/roles/edx_django_service/templates/edx/app/supervisor/conf.d.available/app-workers.conf.j2 @@ -0,0 +1,50 @@ +{% if edx_django_service_enable_experimental_docker_shim %} +{% for w in edx_django_service_workers %} +{% for count in range(1, 1 + w.concurrency) %} +[program:{{ w.queue }}_{{ count }}] + +environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ edx_django_service_newrelic_appname }}-workers,NEW_RELIC_DISTRIBUTED_TRACING_ENABLED={{ edx_django_service_enable_newrelic_distributed_tracing }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}CONCURRENCY=1,LOGLEVEL=info,DJANGO_SETTINGS_MODULE={{ worker_django_settings_module }},LANG={{ edx_django_service_lang }},PYTHONPATH={{ edx_django_service_code_dir }},BOTO_CONFIG="{{ edx_django_service_home }}/.boto",EDX_REST_API_CLIENT_NAME={{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ edx_django_service_name }}-worker-{{ w.queue }},DOCKER_WORKER_QUEUE={{ w.queue }},DOCKER_WORKER_NUM={{ count }} + +user={{ common_web_user }} +directory={{ edx_django_service_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log + +command={{ edx_django_service_home }}/{{ edx_django_service_name }}-workers.sh worker -A {{ edx_django_service_name }} --app {{ edx_django_service_name }}.celery:app --loglevel=info --queue={{ w.queue }} --hostname={{ edx_django_service_name }}.{{ w.queue }}.%%h --concurrency=1 {{ '--maxtasksperchild ' + w.max_tasks_per_child|string if w.max_tasks_per_child is defined else '' }} {{ '--without-heartbeat' if not edx_django_service_celery_heartbeat_enabled|bool else '' }} +killasgroup=true +stopwaitsecs={{ w.stopwaitsecs | default(edx_django_service_default_stopwaitsecs) }} +; Set autorestart to `true`. The default value for autorestart is `unexpected`, but celery < 4.x will exit +; with an exit code of zero for certain types of unrecoverable errors, so we must make sure that the workers +; are auto restarted even when exiting with code 0. +; The Celery bug was reported in https://github.com/celery/celery/issues/2024, and is fixed in Celery 4.0.0. +autorestart=true + +{% endfor %} +{% endfor %} + +[group:{{ edx_django_service_name }}_workers] +programs={%- for w in edx_django_service_workers %}{%- for c in range(1, 1 + w.concurrency) %}{{ w.queue }}_{{ c }}{%- if not loop.last %},{%- endif %}{%- endfor %}{%- if not loop.last %},{%- endif %}{%- endfor %} +{% else %} +{% for w in edx_django_service_workers %} +[program:{{ w.queue }}_{{ w.concurrency }}] + +environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ edx_django_service_newrelic_appname }}-workers,NEW_RELIC_DISTRIBUTED_TRACING_ENABLED={{ edx_django_service_enable_newrelic_distributed_tracing }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}CONCURRENCY={{ w.concurrency }},LOGLEVEL=info,DJANGO_SETTINGS_MODULE={{ worker_django_settings_module }},LANG={{ edx_django_service_lang }},PYTHONPATH={{ edx_django_service_code_dir }},BOTO_CONFIG="{{ edx_django_service_home }}/.boto",EDX_REST_API_CLIENT_NAME={{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ edx_django_service_name }}-worker-{{ w.queue }} +user={{ common_web_user }} +directory={{ edx_django_service_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log + +command={{ edx_django_service_home }}/{{ edx_django_service_name }}-workers.sh worker -A {{ edx_django_service_name }} --app {{ edx_django_service_name }}.celery:app --loglevel=info --queue={{ w.queue }} --hostname={{ edx_django_service_name }}.{{ w.queue }}.%%h --concurrency={{ w.concurrency }} {{ '--maxtasksperchild ' + w.max_tasks_per_child|string if w.max_tasks_per_child is defined else '' }} {{ '--without-heartbeat' if not edx_django_service_celery_heartbeat_enabled|bool else '' }} +killasgroup=true +stopwaitsecs={{ w.stopwaitsecs | default(edx_django_service_default_stopwaitsecs) }} +; Set autorestart to `true`. The default value for autorestart is `unexpected`, but celery < 4.x will exit +; with an exit code of zero for certain types of unrecoverable errors, so we must make sure that the workers +; are auto restarted even when exiting with code 0. +; The Celery bug was reported in https://github.com/celery/celery/issues/2024, and is fixed in Celery 4.0.0. +autorestart=true + +{% endfor %} + +[group:{{ edx_django_service_name }}_workers] +programs={%- for w in edx_django_service_workers %}{{ w.queue }}_{{ w.concurrency }}{%- if not loop.last %},{%- endif %}{%- endfor %} +{% endif %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/defaults/main.yml b/playbooks/roles/edx_django_service_with_rendered_config/defaults/main.yml new file mode 100644 index 00000000000..03d5e4f3988 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/defaults/main.yml @@ -0,0 +1,244 @@ +--- +edx_django_service_with_rendered_config_service_name: ' NOT-SET ' +edx_django_service_with_rendered_config_repo: '{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_home: '{{ COMMON_APP_DIR }}/{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_user: '{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_use_python3: true + +# This should be overwritten at the time Ansible is run. +edx_django_service_with_rendered_config_is_devstack: false + +edx_django_service_with_rendered_config_has_static_assets: true + +edx_django_service_with_rendered_config_wsgi_name: '{{ edx_django_service_with_rendered_config_service_name }}' + +edx_django_service_with_rendered_config_name_devstack_logs: + - '{{ supervisor_log_dir }}/{{ edx_django_service_with_rendered_config_service_name }}-stdout.log' + - '{{ supervisor_log_dir }}/{{ edx_django_service_with_rendered_config_service_name }}-stderr.log' + +edx_django_service_with_rendered_config_git_protocol: '{{ COMMON_GIT_PROTOCOL }}' +edx_django_service_with_rendered_config_git_domain: '{{ COMMON_GIT_MIRROR }}' +edx_django_service_with_rendered_config_git_path: '{{ COMMON_GIT_PATH }}' +edx_django_service_with_rendered_config_version: 'master' +edx_django_service_with_rendered_config_git_identity: null +edx_django_service_with_rendered_config_django_settings_module: null + +edx_django_service_with_rendered_config_code_dir: '{{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_venv_dir: '{{ edx_django_service_with_rendered_config_home }}/venvs/{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_venv_bin_dir: '{{ edx_django_service_with_rendered_config_venv_dir }}/bin' + +edx_django_service_with_rendered_config_nodeenv_dir: '{{ edx_django_service_with_rendered_config_home }}/nodeenvs/{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_nodeenv_bin: '{{ edx_django_service_with_rendered_config_nodeenv_dir }}/bin' +edx_django_service_with_rendered_config_node_modules_dir: '{{ edx_django_service_with_rendered_config_code_dir }}/node_modules' +edx_django_service_with_rendered_config_node_bin: '{{ edx_django_service_with_rendered_config_node_modules_dir }}/.bin' +edx_django_service_with_rendered_config_node_version: '12.11.1' + +edx_django_service_with_rendered_config_environment_default: + DJANGO_SETTINGS_MODULE: '{{ edx_django_service_with_rendered_config_django_settings_module }}' + PATH: '{{ edx_django_service_with_rendered_config_nodeenv_bin }}:{{ edx_django_service_with_rendered_config_venv_dir }}/bin:{{ ansible_env.PATH }}' +edx_django_service_with_rendered_config_environment_extra: {} +edx_django_service_with_rendered_config_environment: '{{ edx_django_service_with_rendered_config_environment_default | combine(edx_django_service_with_rendered_config_environment_extra) }}' + +edx_django_service_with_rendered_config_migration_environment_default: + DB_MIGRATION_USER: '{{ COMMON_MYSQL_MIGRATE_USER }}' + DB_MIGRATION_PASS: '{{ COMMON_MYSQL_MIGRATE_PASS }}' +edx_django_service_with_rendered_config_migration_environment: '{{ edx_django_service_with_rendered_config_environment|combine(edx_django_service_with_rendered_config_migration_environment_default) }}' + +edx_django_service_with_rendered_config_debian_pkgs_default: + - gettext + - libffi-dev # Needed to install the Python cryptography library for asymmetric JWT signing + - libmemcached-dev + - libmysqlclient-dev + - libssl-dev +edx_django_service_with_rendered_config_debian_pkgs_extra: [] +edx_django_service_with_rendered_config_debian_pkgs: '{{ edx_django_service_with_rendered_config_debian_pkgs_default + edx_django_service_with_rendered_config_debian_pkgs_extra }}' + +edx_django_service_with_rendered_config_gunicorn_extra: '' +edx_django_service_with_rendered_config_gunicorn_extra_conf: '' +edx_django_service_with_rendered_config_gunicorn_host: '127.0.0.1' +edx_django_service_with_rendered_config_gunicorn_port: null +edx_django_service_with_rendered_config_gunicorn_timeout: 300 +edx_django_service_with_rendered_config_gunicorn_workers: 2 +edx_django_service_with_rendered_config_gunicorn_worker_class: 'gevent' +edx_django_service_with_rendered_config_gunicorn_max_requests: null + +edx_django_service_with_rendered_config_cors_whitelist: [] +edx_django_service_with_rendered_config_allow_cors_headers: false +edx_django_service_with_rendered_config_max_webserver_upload: !!null +edx_django_service_with_rendered_config_allow_cors_credentials: false + +nginx_edx_django_service_with_rendered_config_gunicorn_hosts: + - 127.0.0.1 + +edx_django_service_with_rendered_config_hostname: '~^((stage|prod)-)?{{ edx_django_service_with_rendered_config_service_name }}.*' +edx_django_service_with_rendered_config_nginx_port: '1{{ edx_django_service_with_rendered_config_gunicorn_port }}' +edx_django_service_with_rendered_config_nginx_read_timeout: !!null + +EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ENABLE_S3_MAINTENANCE: false +EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_S3_MAINTENANCE_FILE: "/edx-static/maintenance/maintenance.html" + +edx_django_service_with_rendered_config_ssl_nginx_port: '4{{ edx_django_service_with_rendered_config_gunicorn_port }}' +edx_django_service_with_rendered_config_enable_basic_auth: false +edx_django_service_with_rendered_config_basic_auth_exempted_paths_default: + - api +edx_django_service_with_rendered_config_basic_auth_exempted_paths_extra: [] +edx_django_service_with_rendered_config_basic_auth_exempted_paths: '{{ edx_django_service_with_rendered_config_basic_auth_exempted_paths_default + edx_django_service_with_rendered_config_basic_auth_exempted_paths_extra }}' + +edx_django_service_with_rendered_config_newrelic_appname: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_enable_newrelic_distributed_tracing: false + +edx_django_service_with_rendered_config_repos: + - PROTOCOL: '{{ edx_django_service_with_rendered_config_git_protocol }}' + DOMAIN: '{{ edx_django_service_with_rendered_config_git_domain }}' + PATH: '{{ edx_django_service_with_rendered_config_git_path }}' + REPO: '{{ edx_django_service_with_rendered_config_repo }}.git' + VERSION: '{{ edx_django_service_with_rendered_config_version }}' + DESTINATION: '{{ edx_django_service_with_rendered_config_code_dir }}' + SSH_KEY: '{{ edx_django_service_with_rendered_config_git_identity }}' + +edx_django_service_with_rendered_config_secret_key: null +edx_django_service_with_rendered_config_language_code: 'en-us' + +edx_django_service_with_rendered_config_data_dir: '{{ COMMON_DATA_DIR }}/{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_media_root: '{{ edx_django_service_with_rendered_config_data_dir }}/media' +edx_django_service_with_rendered_config_media_url: '/media/' + +edx_django_service_with_rendered_config_static_root: '{{ COMMON_DATA_DIR }}/{{ edx_django_service_with_rendered_config_service_name }}/staticfiles' +edx_django_service_with_rendered_config_staticfiles_storage: 'django.contrib.staticfiles.storage.StaticFilesStorage' + +edx_django_service_with_rendered_config_media_storage_backend: + DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' + MEDIA_ROOT: '{{ edx_django_service_with_rendered_config_media_root }}' + MEDIA_URL: '{{ edx_django_service_with_rendered_config_media_url }}' + +edx_django_service_with_rendered_config_memcache: [ 'memcache' ] + +edx_django_service_with_rendered_config_caches: + default: + BACKEND: 'django.core.cache.backends.memcached.MemcachedCache' + KEY_PREFIX: '{{ edx_django_service_with_rendered_config_service_name }}' + LOCATION: '{{ edx_django_service_with_rendered_config_memcache }}' + +edx_django_service_with_rendered_config_default_db_host: 'localhost' +edx_django_service_with_rendered_config_default_db_name: '{{ edx_django_service_with_rendered_config_service_name }}' +edx_django_service_with_rendered_config_default_db_atomic_requests: false +edx_django_service_with_rendered_config_default_db_conn_max_age: 60 +edx_django_service_with_rendered_config_db_user: 'REPLACE-ME' +edx_django_service_with_rendered_config_db_password: 'password' +edx_django_service_with_rendered_config_db_options: + connect_timeout: 10 + init_command: "SET sql_mode='STRICT_TRANS_TABLES'" + +edx_django_service_with_rendered_config_databases: + default: + ENGINE: 'django.db.backends.mysql' + NAME: '{{ edx_django_service_with_rendered_config_default_db_name }}' + USER: '{{ edx_django_service_with_rendered_config_db_user }}' + PASSWORD: '{{ edx_django_service_with_rendered_config_db_password }}' + HOST: '{{ edx_django_service_with_rendered_config_default_db_host }}' + PORT: '3306' + ATOMIC_REQUESTS: '{{ edx_django_service_with_rendered_config_default_db_atomic_requests }}' + CONN_MAX_AGE: '{{ edx_django_service_with_rendered_config_default_db_conn_max_age }}' + OPTIONS: '{{ edx_django_service_with_rendered_config_db_options }}' + +edx_django_service_with_rendered_config_social_auth_edx_oauth2_key: '{{ edx_django_service_with_rendered_config_service_name }}-sso-key' +edx_django_service_with_rendered_config_social_auth_edx_oauth2_secret: '{{ edx_django_service_with_rendered_config_service_name }}-sso-secret' +edx_django_service_with_rendered_config_backend_service_edx_oauth2_key: '{{ edx_django_service_with_rendered_config_service_name }}-backend-service-key' +edx_django_service_with_rendered_config_backend_service_edx_oauth2_secret: '{{ edx_django_service_with_rendered_config_service_name }}-backend-service-secret' +edx_django_service_with_rendered_config_social_auth_redirect_is_https: false + +edx_django_service_with_rendered_config_oauth2_url_root: '{{ COMMON_LMS_BASE_URL }}' +edx_django_service_with_rendered_config_oauth2_issuer: '{{ COMMON_LMS_BASE_URL }}' +edx_django_service_with_rendered_config_oauth2_logout_url: '{{ COMMON_OAUTH_LOGOUT_URL }}' +edx_django_service_with_rendered_config_oauth2_provider_url: '{{ COMMON_OAUTH_PUBLIC_URL_ROOT }}' + +edx_django_service_with_rendered_config_jwt_audience: '{{ COMMON_JWT_AUDIENCE }}' +edx_django_service_with_rendered_config_jwt_issuer: '{{ COMMON_JWT_ISSUER }}' +edx_django_service_with_rendered_config_jwt_secret_key: '{{ COMMON_JWT_SECRET_KEY }}' + +edx_django_service_with_rendered_config_session_expire_at_browser_close: false + +edx_django_service_with_rendered_config_jwt_auth: + JWT_ISSUERS: + - AUDIENCE: '{{ edx_django_service_with_rendered_config_jwt_audience }}' + ISSUER: '{{ edx_django_service_with_rendered_config_jwt_issuer }}' + SECRET_KEY: '{{ edx_django_service_with_rendered_config_jwt_secret_key }}' + JWT_PUBLIC_SIGNING_JWK_SET: '{{ COMMON_JWT_PUBLIC_SIGNING_JWK_SET|string }}' + JWT_AUTH_COOKIE_HEADER_PAYLOAD: '{{ COMMON_JWT_AUTH_COOKIE_HEADER_PAYLOAD }}' + JWT_AUTH_COOKIE_SIGNATURE: '{{ COMMON_JWT_AUTH_COOKIE_SIGNATURE }}' + JWT_AUTH_REFRESH_COOKIE: '{{ COMMON_JWT_AUTH_REFRESH_COOKIE }}' + +edx_django_service_with_rendered_config_extra_apps: [] + +edx_django_service_with_rendered_config_api_root: !!null + +edx_django_service_with_rendered_config_service_config_default: + LANGUAGE_CODE: '{{ edx_django_service_with_rendered_config_language_code }}' + SECRET_KEY: '{{ edx_django_service_with_rendered_config_secret_key }}' + TIME_ZONE: 'UTC' + + STATIC_ROOT: '{{ edx_django_service_with_rendered_config_static_root }}' + MEDIA_STORAGE_BACKEND: '{{ edx_django_service_with_rendered_config_media_storage_backend }}' + STATICFILES_STORAGE: '{{ edx_django_service_with_rendered_config_staticfiles_storage }}' + + CACHES: '{{ edx_django_service_with_rendered_config_caches }}' + DATABASES: '{{ edx_django_service_with_rendered_config_databases }}' + + SOCIAL_AUTH_EDX_OAUTH2_KEY: '{{ edx_django_service_with_rendered_config_social_auth_edx_oauth2_key }}' + SOCIAL_AUTH_EDX_OAUTH2_SECRET: '{{ edx_django_service_with_rendered_config_social_auth_edx_oauth2_secret }}' + SOCIAL_AUTH_EDX_OAUTH2_ISSUER: '{{ edx_django_service_with_rendered_config_oauth2_issuer }}' + SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: '{{ edx_django_service_with_rendered_config_oauth2_url_root }}' + SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: '{{ edx_django_service_with_rendered_config_oauth2_logout_url }}' + SOCIAL_AUTH_REDIRECT_IS_HTTPS: '{{ edx_django_service_with_rendered_config_social_auth_redirect_is_https }}' + + BACKEND_SERVICE_EDX_OAUTH2_KEY: '{{ edx_django_service_with_rendered_config_backend_service_edx_oauth2_key }}' + BACKEND_SERVICE_EDX_OAUTH2_SECRET: '{{ edx_django_service_with_rendered_config_backend_service_edx_oauth2_secret }}' + BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: '{{ edx_django_service_with_rendered_config_oauth2_provider_url }}' + + JWT_AUTH: '{{ edx_django_service_with_rendered_config_jwt_auth }}' + + EXTRA_APPS: '{{ edx_django_service_with_rendered_config_extra_apps }}' + + EDX_DRF_EXTENSIONS: + OAUTH2_USER_INFO_URL: '{{ edx_django_service_with_rendered_config_oauth2_url_root }}/user_info' + + SESSION_EXPIRE_AT_BROWSER_CLOSE: '{{ edx_django_service_with_rendered_config_session_expire_at_browser_close }}' + + API_ROOT: '{{ edx_django_service_with_rendered_config_api_root }}' + +# NOTE: This should be overridden by inheriting service-specific role. +edx_django_service_with_rendered_config_service_config_overrides: {} +edx_django_service_with_rendered_config_service_config: '{{ edx_django_service_with_rendered_config_service_config_default|combine(edx_django_service_with_rendered_config_service_config_overrides) }}' + +edx_django_service_with_rendered_config_automated_users: + automated_user: + sudo_commands: + - command: '{{ edx_django_service_with_rendered_config_venv_dir }}/python {{ edx_django_service_with_rendered_config_code_dir }}/manage.py showmigrations' + sudo_user: '{{ edx_django_service_with_rendered_config_user }}' + authorized_keys: + - 'SSH authorized key' + +edx_django_service_with_rendered_config_sandbox_build: false + +# This array contains commands that should be run after migration. +# +# The commands will be executed from the code directory with the application's virtualenv activated. The migration +# environment (e.g. migration DB username/password) will NOT be used, so commands should not rely on these values being +# set. In other words, don't try to sneak in another run of the migrate management command. +# +# Example: +# edx_django_service_with_rendered_config_post_migrate_management_commands: +# - command: './manage.py conditional_command' +# when: '{{ foo }}' +# - command: './manage.py always_command' +# when: True +# +# In this example, the "conditional_command" will only be run when the variable `foo` is set to `True`. The +# "always_command" will always be run because its conditional is set to `True`. To minimize surprises, the `when` +# key *MUST* be supplied for all commands. +# +edx_django_service_with_rendered_config_post_migrate_commands: [] + +EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ENABLE_ADMIN_URLS_RESTRICTION: false + +EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ADMIN_URLS: [] diff --git a/playbooks/roles/edx_django_service_with_rendered_config/meta/main.yml b/playbooks/roles/edx_django_service_with_rendered_config/meta/main.yml new file mode 100644 index 00000000000..b9e62940116 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/meta/main.yml @@ -0,0 +1,15 @@ +--- +dependencies: + - common + - supervisor + - role: automated + AUTOMATED_USERS: "{{ edx_django_service_with_rendered_config_automated_users }}" + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ edx_django_service_with_rendered_config_service_name }}" + edx_service_with_rendered_config_service_config: "{{ edx_django_service_with_rendered_config_service_config }}" + edx_service_with_rendered_config_repos: "{{ edx_django_service_with_rendered_config_repos }}" + edx_service_with_rendered_config_user: "{{ edx_django_service_with_rendered_config_user }}" + edx_service_with_rendered_config_home: "{{ edx_django_service_with_rendered_config_home }}" + edx_service_with_rendered_config_packages: + debian: "{{ edx_django_service_with_rendered_config_debian_pkgs }}" + redhat: [] diff --git a/playbooks/roles/edx_django_service_with_rendered_config/tasks/main.yml b/playbooks/roles/edx_django_service_with_rendered_config/tasks/main.yml new file mode 100644 index 00000000000..c9c8cfd0fac --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/tasks/main.yml @@ -0,0 +1,296 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role edx_django_service_with_rendered_config +# +# Overview: +# For devstack set edx_django_service_with_rendered_config_is_devstack to true. +# +# Dependencies: +# +# +# Example play: +# +# + +- name: add gunicorn configuration file + template: + src: "edx/app/app/app_gunicorn.py.j2" + dest: "{{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}_gunicorn.py" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + tags: + - install + - install:configuration + +- name: install python3 + apt: + name: "{{ item }}" + when: edx_django_service_with_rendered_config_use_python3 + with_items: + - python3-pip + - python3-dev + tags: + - install + - install:system-requirements + +- name: build virtualenv with python3 + command: "virtualenv --python=python3 {{ edx_django_service_with_rendered_config_venv_dir }}" + args: + creates: "{{ edx_django_service_with_rendered_config_venv_dir }}/bin/pip" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + when: edx_django_service_with_rendered_config_use_python3 + tags: + - install + - install:system-requirements + +- name: build virtualenv with python2.7 + command: "virtualenv --python=python2.7 {{ edx_django_service_with_rendered_config_venv_dir }}" + args: + creates: "{{ edx_django_service_with_rendered_config_venv_dir }}/bin/pip" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + when: not edx_django_service_with_rendered_config_use_python3 + tags: + - install + - install:system-requirements + +- name: Pin pip to a specific version. + command: "{{ edx_django_service_with_rendered_config_venv_dir }}/bin/pip install pip=={{ common_pip_version }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + tags: + - install + - install:system-requirements + +# NOTE (CCB): Ideally we should use the pip Ansible command, +# but that doesn't seem to work with the Python 3.x virtualenv. +- name: install nodenv + command: pip install nodeenv + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_environment }}" + tags: + - install + - install:system-requirements + +- name: create nodeenv + command: "nodeenv {{ edx_django_service_with_rendered_config_nodeenv_dir }} --node={{ edx_django_service_with_rendered_config_node_version }} --prebuilt" + args: + creates: "{{ edx_django_service_with_rendered_config_nodeenv_dir }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_environment }}" + tags: + - install + - install:system-requirements + +- name: install production requirements + command: make production-requirements + args: + chdir: "{{ edx_django_service_with_rendered_config_code_dir }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_environment }}" + tags: + - install + - install:app-requirements + +- name: install development requirements + command: make requirements + args: + chdir: "{{ edx_django_service_with_rendered_config_code_dir }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_environment }}" + when: edx_django_service_with_rendered_config_is_devstack is defined and edx_django_service_with_rendered_config_is_devstack + tags: + - install + - install:app-requirements + - devstack + - devstack:install + +- name: migrate database + command: make migrate + args: + chdir: "{{ edx_django_service_with_rendered_config_code_dir }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_migration_environment }}" + when: migrate_db is defined and migrate_db|lower == "yes" + run_once: yes + tags: + - migrate + - migrate:db + +- name: run post-migrate commands + command: "{{ item.command }}" + args: + chdir: "{{ edx_django_service_with_rendered_config_code_dir }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_environment }}" + with_items: '{{ edx_django_service_with_rendered_config_post_migrate_commands }}' + when: migrate_db is defined and migrate_db|lower == "yes" and item.when|bool + run_once: yes + tags: + - migrate + - migrate:db + - migrate:post + +- name: ensure log files exist for tailing + file: + path: "{{ item }}" + state: touch + owner: "{{ common_web_user }}" + group: "{{ common_web_user }}" + with_items: '{{ edx_django_service_with_rendered_config_name_devstack_logs }}' + tags: + - install + - install:configuration + +- name: write out the supervisor wrapper + template: + src: "edx/app/app/app.sh.j2" + dest: "{{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}.sh" + mode: 0650 + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + tags: + - install + - install:configuration + +- name: write supervisord config + template: + src: "edx/app/supervisor/conf.d.available/app.conf.j2" + dest: "{{ supervisor_available_dir }}/{{ edx_django_service_with_rendered_config_service_name }}.conf" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0644 + tags: + - install + - install:configuration + +- name: write devstack script + template: + src: "edx/app/app/devstack.sh.j2" + dest: "{{ edx_django_service_with_rendered_config_home }}/devstack.sh" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0744 + when: edx_django_service_with_rendered_config_is_devstack is defined and edx_django_service_with_rendered_config_is_devstack + tags: + - devstack + - devstack:install + +- name: setup the app env file + template: + src: "edx/app/app/app_env.j2" + dest: "{{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}_env" + owner: "{{ edx_django_service_with_rendered_config_user }}" + group: "{{ edx_django_service_with_rendered_config_user }}" + mode: 0644 + tags: + - install + - install:configuration + +- name: enable supervisor script + file: + src: "{{ supervisor_available_dir }}/{{ edx_django_service_with_rendered_config_service_name }}.conf" + dest: "{{ supervisor_cfg_dir }}/{{ edx_django_service_with_rendered_config_service_name }}.conf" + state: link + force: yes + when: not disable_edx_services + tags: + - install + - install:configuration + +- name: update supervisor configuration + command: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" + when: not disable_edx_services + tags: + - manage + - manage:start + +- name: create symlinks from the venv bin dir + file: + src: "{{ edx_django_service_with_rendered_config_venv_dir }}/bin/{{ item }}" + dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ edx_django_service_with_rendered_config_service_name }}" + state: link + with_items: + - python + - pip + - django-admin.py + tags: + - install + - install:app-requirements + +- name: create symlinks from the repo dir + file: + src: "{{ edx_django_service_with_rendered_config_code_dir }}/{{ item }}" + dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ edx_django_service_with_rendered_config_service_name }}" + state: link + with_items: + - manage.py + tags: + - install + - install:app-requirements + +- name: compile static assets + command: make static + args: + chdir: "{{ edx_django_service_with_rendered_config_code_dir }}" + become_user: "{{ edx_django_service_with_rendered_config_user }}" + environment: "{{ edx_django_service_with_rendered_config_environment }}" + when: edx_django_service_with_rendered_config_has_static_assets + tags: + - assets + - assets:gather + +- name: restart the application + supervisorctl: + state: restarted + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + name: "{{ edx_django_service_with_rendered_config_service_name }}" + when: not disable_edx_services + become_user: "{{ supervisor_service_user }}" + tags: + - manage + - manage:start + +- name: Copying nginx configs for the service + template: + src: "edx/app/nginx/sites-available/app.j2" + dest: "{{ nginx_sites_available_dir }}/{{ edx_django_service_with_rendered_config_service_name }}" + owner: root + group: "{{ common_web_user }}" + mode: 0640 + when: nginx_app_dir is defined + notify: reload nginx + tags: + - install + - install:vhosts + +- name: Creating nginx config links for the service + file: + src: "{{ nginx_sites_available_dir }}/{{ edx_django_service_with_rendered_config_service_name }}" + dest: "{{ nginx_sites_enabled_dir }}/{{ edx_django_service_with_rendered_config_service_name }}" + state: link + owner: root + group: root + when: nginx_app_dir is defined + notify: reload nginx + tags: + - install + - install:vhosts + +- name: Include JWT signature setting in the app config file + include_role: + name: jwt_signature + when: edx_django_service_with_rendered_config_sandbox_build + vars: + app_name: '{{ edx_django_service_with_rendered_config_service_name }}' + app_config_file: "{{ COMMON_CFG_DIR }}/{{ edx_django_service_with_rendered_config_service_name }}.yml" + app_config_owner: root + app_config_group: root + app_config_mode: 0644 diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app.sh.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app.sh.j2 new file mode 100644 index 00000000000..0a7b94b2e64 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app.sh.j2 @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +{% set edx_django_service_with_rendered_config_venv_bin = edx_django_service_with_rendered_config_venv_dir + "/bin" %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = edx_django_service_with_rendered_config_venv_bin + '/newrelic-admin run-program ' + edx_django_service_with_rendered_config_venv_bin + '/gunicorn' %} +{% else %} +{% set executable = edx_django_service_with_rendered_config_venv_bin + '/gunicorn' %} +{% endif %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED="{{ edx_django_service_with_rendered_config_enable_newrelic_distributed_tracing }}" +export NEW_RELIC_APP_NAME="{{ edx_django_service_with_rendered_config_newrelic_appname }}" +if command -v ec2metadata >/dev/null 2>&1; then + INSTANCEID=$(ec2metadata --instance-id); + HOSTNAME=$(hostname) + export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME-$INSTANCEID" +fi +export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" +{% endif -%} + +export EDX_REST_API_CLIENT_NAME="{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ edx_django_service_with_rendered_config_service_name }}" + +source {{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}_env +exec {{ executable }} -c {{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}_gunicorn.py {{ edx_django_service_with_rendered_config_gunicorn_extra }} {{ edx_django_service_with_rendered_config_wsgi_name }}.wsgi:application diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app_env.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app_env.j2 new file mode 100644 index 00000000000..b6c48b27077 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app_env.j2 @@ -0,0 +1,7 @@ +# {{ ansible_managed }} + +{% for name,value in edx_django_service_with_rendered_config_environment.items() -%} +{%- if value -%} +export {{ name }}="{{ value }}" +{% endif %} +{%- endfor %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app_gunicorn.py.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app_gunicorn.py.j2 new file mode 100644 index 00000000000..598409cb561 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/app_gunicorn.py.j2 @@ -0,0 +1,18 @@ +""" +gunicorn configuration file: http://docs.gunicorn.org/en/develop/configure.html +{{ ansible_managed }} +""" + +timeout = {{ edx_django_service_with_rendered_config_with_rendered_config_gunicorn_timeout }} +bind = "{{ edx_django_service_with_rendered_config_gunicorn_host }}:{{ edx_django_service_with_rendered_config_gunicorn_port }}" +pythonpath = "{{ edx_django_service_with_rendered_config_code_dir }}" +workers = {{ edx_django_service_with_rendered_config_gunicorn_workers }} +worker_class = "{{ edx_django_service_with_rendered_config_gunicorn_worker_class }}" + +{% if edx_django_service_with_rendered_config_gunicorn_max_requests -%} +max_requests = {{ edx_django_service_with_rendered_config_gunicorn_max_requests }} +{% endif %} + +{{ edx_django_service_with_rendered_config_gunicorn_extra_conf }} + +{{ common_pre_request }} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/devstack.sh.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/devstack.sh.j2 new file mode 100644 index 00000000000..f41d4a7a874 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/app/devstack.sh.j2 @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +source {{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}_env +COMMAND=$1 + +case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; + open) + . {{ edx_django_service_with_rendered_config_nodeenv_bin }}/activate + . {{ edx_django_service_with_rendered_config_venv_bin_dir }}/activate + cd {{ edx_django_service_with_rendered_config_code_dir }} + + /bin/bash + ;; + exec) + shift + + . {{ edx_django_service_with_rendered_config_nodeenv_bin }}/activate + . {{ edx_django_service_with_rendered_config_venv_bin_dir }}/activate + cd {{ edx_django_service_with_rendered_config_code_dir }} + + "$@" + ;; + *) + "$@" + ;; +esac diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/app.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/app.j2 new file mode 100644 index 00000000000..9e8c72db16c --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/app.j2 @@ -0,0 +1,34 @@ +# +# {{ ansible_managed }} +# + +{% include "concerns/upstream.j2"%} +{% include "concerns/cors-build-map.j2" %} + +server { + server_name {{ edx_django_service_with_rendered_config_hostname }}; + listen {{ edx_django_service_with_rendered_config_nginx_port }}; + +{% if NGINX_ENABLE_SSL %} + {% include "concerns/handle-ip-disclosure.j2" %} + rewrite ^ https://$host$request_uri? permanent; +{% else %} + {% if NGINX_REDIRECT_TO_HTTPS %} + {% include "concerns/handle-tls-terminated-elsewhere-ip-disclosure.j2" %} + {% include "concerns/handle-tls-terminated-elsewhere-redirect.j2" %} + {% endif %} + {% include "concerns/app-common.j2" %} +{% endif %} +} + +{% if NGINX_ENABLE_SSL %} +server { + server_name {{ edx_django_service_with_rendered_config_hostname }}; + listen {{ edx_django_service_with_rendered_config_ssl_nginx_port }} ssl; + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; + + {% include "concerns/app-common.j2" %} +} +{% endif %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/admin_urls_access_from_restricted_cidrs.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/admin_urls_access_from_restricted_cidrs.j2 new file mode 100644 index 00000000000..a631375b7bf --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/admin_urls_access_from_restricted_cidrs.j2 @@ -0,0 +1,11 @@ +{% if NGINX_ADMIN_ACCESS_CIDRS and EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ENABLE_ADMIN_URLS_RESTRICTION %} + location ~ ^/({{ EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ADMIN_URLS|join("|") }}) { + real_ip_header X-Forwarded-For; + set_real_ip_from {{ NGINX_TRUSTED_IP_CIDRS }}; + {% for cidr in NGINX_ADMIN_ACCESS_CIDRS %} + allow {{ cidr }}; + {% endfor %} + deny all; + try_files $uri @proxy_to_app; + } +{% endif %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/app-common.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/app-common.j2 new file mode 100644 index 00000000000..75a2497dccf --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/app-common.j2 @@ -0,0 +1,10 @@ + +{% include "concerns/s3_maintenance.j2" %} +{% include "concerns/static-assets.j2" %} +{% include "concerns/proxy-to-app.j2" %} +{% if edx_django_service_with_rendered_config_max_webserver_upload %} + client_max_body_size {{ edx_django_service_with_rendered_config_max_webserver_upload }}M; +{% endif %} +{% if edx_django_service_with_rendered_config_nginx_read_timeout %} + proxy_read_timeout {{ edx_django_service_with_rendered_config_nginx_read_timeout }}; +{% endif %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 new file mode 100644 index 00000000000..82c8cf21a7d --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/basic-auth.j2 @@ -0,0 +1,17 @@ +{% if edx_django_service_with_rendered_config_enable_basic_auth|bool %} + satisfy any; + + allow 127.0.0.1; + + {% for cidr in COMMON_BASIC_AUTH_EXCEPTIONS %} + allow {{ cidr }}; + {% endfor %} + + deny all; + + auth_basic "Restricted"; + auth_basic_user_file {{ nginx_htpasswd_file }}; + + index index.html + proxy_set_header X-Forwarded-Proto https; +{% endif %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/cors-add-header.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/cors-add-header.j2 new file mode 100644 index 00000000000..fa96d4d179f --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/cors-add-header.j2 @@ -0,0 +1,16 @@ + if ($request_method = 'OPTIONS') { + add_header 'Access-Control-Allow-Origin' $cors_origin; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; + add_header 'Access-Control-Allow-Headers' 'Authorization, USE-JWT-COOKIE'; + {% if edx_django_service_with_rendered_config_allow_cors_credentials %} + add_header 'Access-Control-Allow-Credentials' true; + {% endif %} + add_header 'Access-Control-Max-Age' 86400; + add_header 'Content-Type' 'text/plain; charset=utf-8'; + add_header 'Content-Length' 0; + return 204; + } + + add_header 'Access-Control-Allow-Origin' $cors_origin always; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always; + add_header 'Access-Control-Allow-Credentials' true always; diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/cors-build-map.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/cors-build-map.j2 new file mode 100644 index 00000000000..a8df0c26dfd --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/cors-build-map.j2 @@ -0,0 +1,15 @@ + + +# The Origin request header indicates where a fetch originates from. It doesn't include any path information, +# but only the server name (e.g. https://www.example.com). +# See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin for details. + +# Here we set the value that is included in the Access-Control-Allow-Origin response header. If the origin is one +# of our known hosts--served via HTTP or HTTPS--we allow for CORS. Otherwise, we set the "null" value, disallowing CORS. + +map $http_origin $cors_origin { +default "null"; +{% for host in edx_django_service_with_rendered_config_cors_whitelist %} + "~*^https?:\/\/{{ host|replace('.', '\.') }}$" $http_origin; +{% endfor %} +} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 new file mode 100644 index 00000000000..a7932a8d9d2 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/handle-ip-disclosure.j2 @@ -0,0 +1,7 @@ +# If you are changing this be warned that it lives in multiple places +# there is a TLS redirect to same box, and a TLS redirect to externally terminated TLS +# version of this in nginx and in edx_django_service_with_rendered_config role. + +if ($host ~ "\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}") { + return 403; +} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/handle-tls-terminated-elsewhere-ip-disclosure.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/handle-tls-terminated-elsewhere-ip-disclosure.j2 new file mode 100644 index 00000000000..0c369a3e784 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/handle-tls-terminated-elsewhere-ip-disclosure.j2 @@ -0,0 +1,16 @@ + +# If you are changing this be warned that it lives in multiple places +# there is a TLS redirect to same box, and a TLS redirect to externally terminated TLS +# version of this in nginx and in edx_django_service_with_rendered_config role. + +if ($host ~ "\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}") { + set $test_ip_disclosure A; +} + +if ($http_x_forwarded_for != "") { + set $test_ip_disclosure "${test_ip_disclosure}B"; +} + +if ($test_ip_disclosure = AB) { + return 403; +} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/handle-tls-terminated-elsewhere-redirect.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/handle-tls-terminated-elsewhere-redirect.j2 new file mode 100644 index 00000000000..b83caa33715 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/handle-tls-terminated-elsewhere-redirect.j2 @@ -0,0 +1,18 @@ +{% if NGINX_HTTPS_REDIRECT_STRATEGY == "scheme" %} + if ($scheme != "https") + { + set $do_redirect_to_https "true"; + } + +{% elif NGINX_HTTPS_REDIRECT_STRATEGY == "forward_for_proto" %} + if ($http_x_forwarded_proto = "http") + { + set $do_redirect_to_https "true"; + } +{% endif %} + + if ($do_redirect_to_https = "true") + { + return 301 https://$host$request_uri; + } + diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/proxy-to-app.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/proxy-to-app.j2 new file mode 100644 index 00000000000..b49e5f0a2cd --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/proxy-to-app.j2 @@ -0,0 +1,38 @@ + +location / { + {% include 'concerns/basic-auth.j2' %} + try_files $uri @proxy_to_app; +} + +location ~ ^/({{ edx_django_service_with_rendered_config_basic_auth_exempted_paths | join('|') }})/ { + try_files $uri @proxy_to_app; +} + +{% include 'concerns/robots.j2' %} + +{% include "concerns/admin_urls_access_from_restricted_cidrs.j2" %} + +location @proxy_to_app { + +{% if edx_django_service_with_rendered_config_allow_cors_headers %} + {% include 'concerns/cors-add-header.j2' %} +{% endif %} + +{% if NGINX_SET_X_FORWARDED_HEADERS %} + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $remote_addr; +{% else %} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; +{% endif %} + + # newrelic-specific header records the time when nginx handles a request. + proxy_set_header X-Queue-Start "t=${msec}"; + + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://{{ edx_django_service_with_rendered_config_service_name }}_app_server; +} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/robots.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/robots.j2 new file mode 100644 index 00000000000..49d36d10d90 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/robots.j2 @@ -0,0 +1,6 @@ +{% if NGINX_ROBOT_RULES|length > 0 %} +location /robots.txt { + root {{ nginx_app_dir }}; + try_files $uri /robots.txt =404; +} +{% endif %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/s3_maintenance.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/s3_maintenance.j2 new file mode 100644 index 00000000000..97928f12236 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/s3_maintenance.j2 @@ -0,0 +1,25 @@ +{% if EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ENABLE_S3_MAINTENANCE %} + # Do not include a 502 error in NGINX_ERROR_PAGES when + # EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ENABLE_S3_MAINTENANCE is enabled. + + error_page 502 @maintenance; + + # This section of the file was copied from playbooks/roles/nginx/templates/edx/app/nginx/sites-available/ + # modifications should be made to both files if necessary. + + {% if EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_ENABLE_S3_MAINTENANCE %} + location @maintenance { + rewrite ^(.*) {{ EDX_DJANGO_SERVICE_WITH_RENDERED_CONFIG_S3_MAINTENANCE_FILE }} break; + proxy_http_version 1.1; + proxy_set_header Host s3.amazonaws.com; + proxy_set_header Authorization ''; + proxy_hide_header x-amz-id-2; + proxy_hide_header x-amz-request-id; + proxy_hide_header Set-Cookie; + proxy_ignore_headers "Set-Cookie"; + proxy_buffering off; + proxy_intercept_errors on; + proxy_pass https://s3.amazonaws.com; + } + {% endif %} +{% endif %} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/static-assets.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/static-assets.j2 new file mode 100644 index 00000000000..8e1ef780460 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/static-assets.j2 @@ -0,0 +1,15 @@ +location ~ ^/static/(?P.*) { + root {{ COMMON_DATA_DIR }}/{{ edx_django_service_with_rendered_config_service_name }}; + {% include "concerns/cors-add-header.j2" %} + + # Inform downstream caches to take certain headers into account when reading/writing to cache. + add_header 'Vary' 'Accept-Encoding,Origin'; + + try_files /staticfiles/$file =404; +} + +location ~ ^/media/(?P.*) { + root {{ COMMON_DATA_DIR }}/{{ edx_django_service_with_rendered_config_service_name }}; + try_files /media/$file =404; +} + diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/upstream.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/upstream.j2 new file mode 100644 index 00000000000..09b069e12c5 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/nginx/sites-available/concerns/upstream.j2 @@ -0,0 +1,5 @@ +upstream {{ edx_django_service_with_rendered_config_service_name }}_app_server { +{% for host in nginx_edx_django_service_with_rendered_config_gunicorn_hosts %} + server {{ host }}:{{ edx_django_service_with_rendered_config_gunicorn_port }} fail_timeout=0; +{% endfor %} +} diff --git a/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/supervisor/conf.d.available/app.conf.j2 b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/supervisor/conf.d.available/app.conf.j2 new file mode 100644 index 00000000000..57fe8557f99 --- /dev/null +++ b/playbooks/roles/edx_django_service_with_rendered_config/templates/edx/app/supervisor/conf.d.available/app.conf.j2 @@ -0,0 +1,19 @@ +# +# {{ ansible_managed }} +# + +{% if edx_django_service_with_rendered_config_is_devstack %} +[program:nginx] +command=nginx -g 'daemon off;' +killasgroup=true +stopasgroup=true +{% endif %} + +[program:{{ edx_django_service_with_rendered_config_service_name }}] +command={{ edx_django_service_with_rendered_config_home }}/{{ edx_django_service_with_rendered_config_service_name }}.sh +user={{ common_web_user }} +directory={{ edx_django_service_with_rendered_config_code_dir }} +stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log +stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log +killasgroup=true +stopasgroup=true diff --git a/playbooks/roles/container/defaults/main.yml b/playbooks/roles/edx_maintenance/defaults/main.yml similarity index 63% rename from playbooks/roles/container/defaults/main.yml rename to playbooks/roles/edx_maintenance/defaults/main.yml index 85aff411401..9a8e81a1a4e 100644 --- a/playbooks/roles/container/defaults/main.yml +++ b/playbooks/roles/edx_maintenance/defaults/main.yml @@ -8,18 +8,14 @@ # license: https://github.com/edx/configuration/blob/master/LICENSE.TXT # ## -# Defaults for role container -# - -# -# vars are namespaced with the module name. +# Defaults for role edx_maintenance # -container_role_name: container # -# OS packages +# vars are namespace with the module name. # - -container_debian_pkgs: [] - -container_redhat_pkgs: [] +ENABLE_MAINTENANCE: False +EDX_MAINTENANCE_SUPERVISOR_APPS: + - 'lms' + - 'cms' + - 'edxapp_worker:' diff --git a/playbooks/roles/docker/meta/main.yml b/playbooks/roles/edx_maintenance/meta/main.yml similarity index 84% rename from playbooks/roles/docker/meta/main.yml rename to playbooks/roles/edx_maintenance/meta/main.yml index 441e2662bdb..761b9524e56 100644 --- a/playbooks/roles/docker/meta/main.yml +++ b/playbooks/roles/edx_maintenance/meta/main.yml @@ -8,16 +8,16 @@ # license: https://github.com/edx/configuration/blob/master/LICENSE.TXT # ## -# Role includes for role docker -# +# Role includes for role edx_maintenance +# # Example: # # dependencies: # - { -# role: my_role +# role: my_role # my_role_var0: "foo" # my_role_var1: "bar" # } dependencies: - - container + - common_vars diff --git a/playbooks/roles/edx_maintenance/tasks/main.yml b/playbooks/roles/edx_maintenance/tasks/main.yml new file mode 100644 index 00000000000..d1ee3b798cb --- /dev/null +++ b/playbooks/roles/edx_maintenance/tasks/main.yml @@ -0,0 +1,71 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role edx_maintenance +# + +- name: Find supervisor apps + supervisorctl: + name: "{{ item }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: present + become_user: "{{ supervisor_service_user }}" + ignore_errors: yes + register: supervisor_apps + with_items: "{{ EDX_MAINTENANCE_SUPERVISOR_APPS }}" + tags: + - manage + +- name: Enable fake heartbeat + copy: + content: "" + dest: "{{ nginx_server_static_dir }}/maintenance_heartbeat.txt" + owner: root + group: "{{ common_web_user }}" + mode: "0640" + when: ENABLE_MAINTENANCE + tags: + - manage + +- name: Stop edxapp + supervisorctl: + # Use item.item because item.item strips the : off of edxapp_worker: + name: "{{ item.item }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: stopped + become_user: "{{ supervisor_service_user }}" + when: ENABLE_MAINTENANCE and not item is failed + with_items: "{{ supervisor_apps.results }}" + tags: + - manage + +- name: Start edxap + supervisorctl: + # Use item.item because item.item strips the : off of edxapp_worker: + name: "{{ item.item }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: started + become_user: "{{ supervisor_service_user }}" + when: not ENABLE_MAINTENANCE and not item is failed + with_items: "{{ supervisor_apps.results }}" + tags: + - manage + +- name: Disable fake heartbeat + file: + dest: "{{ nginx_server_static_dir }}/maintenance_heartbeat.txt" + state: absent + when: not ENABLE_MAINTENANCE + tags: + - manage diff --git a/playbooks/roles/edx_notes_api/defaults/main.yml b/playbooks/roles/edx_notes_api/defaults/main.yml index a34513425d0..fe35163ba00 100644 --- a/playbooks/roles/edx_notes_api/defaults/main.yml +++ b/playbooks/roles/edx_notes_api/defaults/main.yml @@ -25,6 +25,9 @@ EDX_NOTES_API_MYSQL_OPTIONS: connect_timeout: 10 EDX_NOTES_API_ELASTICSEARCH_URL: "localhost:9200" EDX_NOTES_API_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-notes" +EDX_NOTES_API_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false +EDX_NOTES_API_HOSTNAME: '~^((stage|prod)-)?notes.*' + # Change these values!! EDX_NOTES_API_SECRET_KEY: "CHANGEME" EDX_NOTES_API_CLIENT_ID: "CHANGEME" @@ -33,6 +36,8 @@ EDX_NOTES_API_GIT_SSH_KEY: !!null EDX_NOTES_API_VERSION: master EDX_NOTES_API_DJANGO_SETTINGS_MODULE: 'notesserver.settings.yaml_config' +EDX_NOTES_USERNAME_REPLACEMENT_WORKER: "OVERRIDE THIS WITH A VALID USERNAME" + EDX_NOTES_API_DATABASES: # rw user default: @@ -56,6 +61,12 @@ EDX_NOTES_API_REPOS: DESTINATION: "{{ edx_notes_api_code_dir }}" SSH_KEY: "{{ EDX_NOTES_API_GIT_SSH_KEY }}" +# Remote config +EDX_NOTES_API_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +EDX_NOTES_API_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +EDX_NOTES_API_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + # # This data structure will be written out to yaml configuration file # in /edx/etc @@ -80,6 +91,16 @@ edx_notes_api_service_config: URL: "{{ EDX_NOTES_API_ELASTICSEARCH_URL }}" INDEX_NAME: '{{ EDX_NOTES_API_DATASTORE_NAME }}' DISABLE_TOKEN_CHECK: "{{ EDX_NOTES_API_DISABLE_TOKEN_CHECK }}" + JWT_AUTH: + JWT_ISSUERS: + - AUDIENCE: '{{ COMMON_JWT_AUDIENCE }}' + ISSUER: '{{ COMMON_JWT_ISSUER }}' + SECRET_KEY: '{{ COMMON_JWT_SECRET_KEY }}' + JWT_PUBLIC_SIGNING_JWK_SET: '{{ COMMON_JWT_PUBLIC_SIGNING_JWK_SET|string }}' + JWT_AUTH_COOKIE_HEADER_PAYLOAD: '{{ COMMON_JWT_AUTH_COOKIE_HEADER_PAYLOAD }}' + JWT_AUTH_COOKIE_SIGNATURE: '{{ COMMON_JWT_AUTH_COOKIE_SIGNATURE }}' + JWT_AUTH_REFRESH_COOKIE: '{{ COMMON_JWT_AUTH_REFRESH_COOKIE }}' + USERNAME_REPLACEMENT_WORKER: "{{ EDX_NOTES_USERNAME_REPLACEMENT_WORKER }}" # # vars are namespace with the module name. @@ -99,6 +120,7 @@ edx_notes_api_home: "{{ COMMON_APP_DIR }}/{{ edx_notes_api_service_name }}" edx_notes_api_code_dir: "{{ edx_notes_api_home }}/{{ edx_notes_api_service_name }}" edx_notes_api_conf_dir: "{{ edx_notes_api_home }}" edx_notes_api_venv_dir: "{{ edx_notes_api_home }}/venvs/{{ edx_notes_api_service_name }}" +edx_notes_api_venv_bin: "{{ edx_notes_api_venv_dir }}/bin" edx_notes_api_gunicorn_host: "127.0.0.1" edx_notes_api_gunicorn_port: 8120 @@ -118,5 +140,7 @@ edx_notes_api_requirements: edx_notes_api_debian_pkgs: - libmysqlclient-dev - python-mysqldb + - libssl-dev # needed for mysqlclient python library + - python3-dev edx_notes_api_redhat_pkgs: [] diff --git a/playbooks/roles/edx_notes_api/meta/main.yml b/playbooks/roles/edx_notes_api/meta/main.yml index e610e973afe..2a73bba84b7 100644 --- a/playbooks/roles/edx_notes_api/meta/main.yml +++ b/playbooks/roles/edx_notes_api/meta/main.yml @@ -31,4 +31,5 @@ dependencies: edx_service_packages: debian: "{{ edx_notes_api_debian_pkgs }}" redhat: "{{ edx_notes_api_redhat_pkgs }}" - + edx_service_decrypt_config_enabled: "{{ EDX_NOTES_API_DECRYPT_CONFIG_ENABLED }}" + edx_service_copy_config_enabled: "{{ EDX_NOTES_API_COPY_CONFIG_ENABLED }}" diff --git a/playbooks/roles/edx_notes_api/tasks/main.yml b/playbooks/roles/edx_notes_api/tasks/main.yml index c2803bd51fd..b29fac37f33 100644 --- a/playbooks/roles/edx_notes_api/tasks/main.yml +++ b/playbooks/roles/edx_notes_api/tasks/main.yml @@ -9,7 +9,7 @@ # # # Tasks for role edx-notes-api -# +# # Overview: # # Role for installing the edx-notes-api Django application, https://github.com/edx/edx-notes-api. @@ -36,7 +36,7 @@ # nginx_sites: # - edx-notes-api # - aws -# - edx-notes-api +# - edx-notes-api # - role: datadog # when: COMMON_ENABLE_DATADOG # - role: splunkforwarder @@ -48,12 +48,25 @@ virtualenv: "{{ edx_notes_api_home }}/venvs/{{ edx_notes_api_service_name }}" state: present extra_args: "--exists-action w" + virtualenv_python: 'python3.5' become_user: "{{ edx_notes_api_user }}" with_items: "{{ edx_notes_api_requirements }}" tags: - install - install:system-requirements +- name: write devstack script + template: + src: "edx/app/edx_notes_api/devstack.sh.j2" + dest: "{{ edx_notes_api_home }}/devstack.sh" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0744 + when: devstack is defined and devstack + tags: + - devstack + - devstack:install + - name: Migrate shell: > DB_MIGRATION_USER='{{ COMMON_MYSQL_MIGRATE_USER }}' diff --git a/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/devstack.sh.j2 b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/devstack.sh.j2 new file mode 100644 index 00000000000..80dac778517 --- /dev/null +++ b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/devstack.sh.j2 @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +source {{ edx_notes_api_home }}/edx_notes_api_env +COMMAND=$1 + +case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; + open) + . {{ edx_notes_api_venv_bin }}/activate + cd {{ edx_notes_api_code_dir }} + + /bin/bash + ;; + exec) + shift + + . {{ edx_notes_api_venv_bin }}/activate + cd {{ edx_notes_api_code_dir }} + + "$@" + ;; + *) + "$@" + ;; +esac diff --git a/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api.sh.j2 b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api.sh.j2 index 9bb137262b5..179ba8e8823 100644 --- a/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api.sh.j2 +++ b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api.sh.j2 @@ -10,6 +10,7 @@ {% endif %} {% if COMMON_ENABLE_NEWRELIC_APP %} +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED="{{ EDX_NOTES_API_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}" export NEW_RELIC_APP_NAME="{{ EDX_NOTES_API_NEWRELIC_APPNAME }}" export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" {% endif -%} @@ -20,4 +21,5 @@ export PID=/var/tmp/edx_notes_api.pid export PORT={{ edx_notes_api_gunicorn_port }} export ADDRESS={{ edx_notes_api_gunicorn_host }} -{{ executable }} -c {{ edx_notes_api_home }}/edx_notes_api_gunicorn.py {{ EDX_NOTES_API_GUNICORN_WORKERS_EXTRA }} {{ edx_notes_api_wsgi }} +# We exec so that gunicorn is the child of supervisor and can be managed properly +exec {{ executable }} -c {{ edx_notes_api_home }}/edx_notes_api_gunicorn.py {{ EDX_NOTES_API_GUNICORN_WORKERS_EXTRA }} {{ edx_notes_api_wsgi }} diff --git a/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api_gunicorn.py.j2 b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api_gunicorn.py.j2 index 371fcf2ef61..d5359ad691c 100644 --- a/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api_gunicorn.py.j2 +++ b/playbooks/roles/edx_notes_api/templates/edx/app/edx_notes_api/edx_notes_api_gunicorn.py.j2 @@ -16,6 +16,8 @@ workers = {{ EDX_NOTES_API_WORKERS }} workers = (multiprocessing.cpu_count()-1) * 2 + 2 {% endif %} +{{ common_pre_request }} + {{ common_close_all_caches }} def post_fork(server, worker): diff --git a/playbooks/roles/edx_service/defaults/main.yml b/playbooks/roles/edx_service/defaults/main.yml index f6b9d3d20f2..3c3dd55c5e9 100644 --- a/playbooks/roles/edx_service/defaults/main.yml +++ b/playbooks/roles/edx_service/defaults/main.yml @@ -33,3 +33,8 @@ edx_service_config_filter_nones: false edx_service_packages: debian: [] redhat: [] + +edx_service_local_config_file: "{{ UNENCRYPTED_CFG_DIR }}/{{ edx_service_name }}.yml" + +edx_service_decrypt_config_enabled: false +edx_service_copy_config_enabled: false diff --git a/playbooks/roles/edx_service/tasks/main.yml b/playbooks/roles/edx_service/tasks/main.yml index c8f66357bf8..65747aef1de 100644 --- a/playbooks/roles/edx_service/tasks/main.yml +++ b/playbooks/roles/edx_service/tasks/main.yml @@ -98,7 +98,44 @@ src: "config.yml.j2" dest: "{{ COMMON_CFG_DIR }}/{{ edx_service_name }}.yml" mode: "0644" - when: edx_service_config + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + when: not edx_service_decrypt_config_enabled + tags: + - install + - install:configuration + - install:app-configuration + +- name: Decrypt app config file + local_action: command asym_crypto_yaml decrypt-encrypted-yaml --secrets_file_path {{ ENCRYPTED_CFG_DIR }}/{{ edx_service_name }}.yml --private_key_path {{ DECRYPT_CONFIG_PRIVATE_KEY }} --outfile_path {{ UNENCRYPTED_CFG_DIR }}/{{ edx_service_name }}.yml + become: false + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + when: edx_service_decrypt_config_enabled + tags: + - install + - install:configuration + - install:app-configuration + +- name: Replace deploy host to sandbox dns name + replace: + path: "{{ UNENCRYPTED_CFG_DIR }}/{{ edx_service_name }}.yml" + regexp: 'deploy_host' + replace: "{{ COMMON_DEPLOY_HOSTNAME }}" + when: edx_service_decrypt_config_enabled and SANDBOX_CONFIG + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + become: false + delegate_to: localhost + tags: + - install + - install:configuration + - install:app-configuration + +- name: Copy app config file + copy: + src: "{{ edx_service_local_config_file }}" + dest: "{{ COMMON_CFG_DIR }}/{{ edx_service_name }}.yml" + mode: 0644 + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + when: edx_service_copy_config_enabled tags: - install - install:configuration @@ -106,11 +143,10 @@ - name: Install a bunch of system packages on which edx_service relies apt: - name: "{{ item }}" + name: "{{ edx_service_packages.debian }}" state: present update_cache: true cache_valid_time: 3600 - with_items: "{{ edx_service_packages.debian }}" when: ansible_distribution in common_debian_variants tags: - install @@ -118,16 +154,16 @@ - name: Install a bunch of system packages on which edx_service relies yum: - name: "{{ item }}" + name: "{{ edx_service_packages.redhat }}" state: present - with_items: "{{ edx_service_packages.redhat }}" when: ansible_distribution in common_redhat_variants tags: - install - install:system-requirements - name: Get instance information - action: ec2_facts + action: ec2_metadata_facts + when: AWS_GATHER_FACTS | default(false) tags: - to-remove diff --git a/playbooks/roles/edx_service/templates/config.yml.j2 b/playbooks/roles/edx_service/templates/config.yml.j2 index 9bf761203cf..93d89041fd5 100644 --- a/playbooks/roles/edx_service/templates/config.yml.j2 +++ b/playbooks/roles/edx_service/templates/config.yml.j2 @@ -2,7 +2,7 @@ # {{ ansible_managed }} {% if edx_service_config_filter_nones -%} - {% for key, value in edx_service_config.copy().iteritems() -%} + {% for key, value in edx_service_config.copy().items() -%} {% if value is none -%} {% do edx_service_config.pop(key) %} {%- endif %} diff --git a/playbooks/roles/edx_service_with_rendered_config/defaults/main.yml b/playbooks/roles/edx_service_with_rendered_config/defaults/main.yml new file mode 100644 index 00000000000..3ba7d4a3626 --- /dev/null +++ b/playbooks/roles/edx_service_with_rendered_config/defaults/main.yml @@ -0,0 +1,37 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role edx_service_with_rendered_config +# + +# +# vars are namespace with the module name. +# +edx_service_with_rendered_config_service_name: edx_service_with_rendered_config + +edx_service_with_rendered_config_repos: [] + +# A few roles meta this role but don't need a config file written +# this allows them to not pass a config and the tasks will skip +# and not write out a config at all. +edx_service_with_rendered_config_service_config: {} + +# If you would like edx_service_with_rendered_config to strip out !!null settings before writing out +# the yaml config, set this to true. +edx_service_with_rendered_config_filter_nones: false + +# +# OS packages +# +edx_service_with_rendered_config_packages: + debian: [] + redhat: [] + +edx_service_with_rendered_config_local_config_file: "{{ UNENCRYPTED_CFG_DIR }}/{{ edx_service_with_rendered_config_service_name }}.yml" diff --git a/playbooks/roles/edx_service_with_rendered_config/meta/main.yml b/playbooks/roles/edx_service_with_rendered_config/meta/main.yml new file mode 100644 index 00000000000..1b8767683e5 --- /dev/null +++ b/playbooks/roles/edx_service_with_rendered_config/meta/main.yml @@ -0,0 +1,29 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role edx_service_with_rendered_config +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } + +dependencies: + - role: edx_service + edx_service_name: "{{ edx_service_with_rendered_config_service_name }}" + edx_service_config: "{{ edx_service_with_rendered_config_service_config }}" + edx_service_user: "{{ edx_service_with_rendered_config_user }}" + edx_service_home: "{{ edx_service_with_rendered_config_home }}" + edx_service_packages: "{{ edx_service_with_rendered_config_packages }}" + edx_service_repos: "{{ edx_service_with_rendered_config_repos }}" diff --git a/playbooks/roles/edx_service_with_rendered_config/tasks/main.yml b/playbooks/roles/edx_service_with_rendered_config/tasks/main.yml new file mode 100644 index 00000000000..75ee2a74af8 --- /dev/null +++ b/playbooks/roles/edx_service_with_rendered_config/tasks/main.yml @@ -0,0 +1,56 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role edx_service_with_rendered_config +# +# Overview: +# +# This role performs the repetive tasks that most edX roles +# require in our default configuration. +# +# Example play: +# +# Rather than being included in the play, this role +# is included as a dependency by other roles in the meta/main.yml +# file. The including role should add the following +# depency definition. +# +# dependencies: +# - role: edx_service_with_rendered_config +# edx_service_with_rendered_config_servicee_name: "hotg" +# edx_service_with_rendered_config_servicee_config: "{{ structure_to_be_written_to_config_file_in_/edx/etc }}" +# edx_service_with_rendered_config_repos: +# - PROTOCOL: [https/ssh] +# DOMAIN: github.com +# PATH: edx +# REPO: hotg +# VERSION: master +# DESTINATION: "/edx/app/hotg/hotg" +# SSH_KEY: +# - PROTOCOL +# ... +# edx_service_with_rendered_config_service_name: hotg_system_user +# edx_service_with_rendered_config_home: "/edx/app/hotg" +# edx_service_with_rendered_config_packages: +# debian: [ pkg1, pkg2, pkg3 ] +# redhat: [ pkg4, pkg5 ] +# + +- name: Write out app config file + template: + src: "config.yml.j2" + dest: "{{ COMMON_CFG_DIR }}/{{ edx_service_with_rendered_config_service_name }}.yml" + mode: "0644" + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + tags: + - install + - install:configuration + - install:app-configuration diff --git a/playbooks/roles/edx_service_with_rendered_config/templates/config.yml.j2 b/playbooks/roles/edx_service_with_rendered_config/templates/config.yml.j2 new file mode 100644 index 00000000000..a3e80ff6f5d --- /dev/null +++ b/playbooks/roles/edx_service_with_rendered_config/templates/config.yml.j2 @@ -0,0 +1,12 @@ +--- +# {{ ansible_managed }} + +{% if edx_service_with_rendered_config_filter_nones -%} + {% for key, value in edx_service_with_rendered_config_service_config.copy().items() -%} + {% if value is none -%} + {% do edx_service_with_rendered_config_service_config.pop(key) %} + {%- endif %} + {%- endfor %} +{%- endif %} + +{{ edx_service_with_rendered_config_service_config | to_nice_yaml }} diff --git a/playbooks/roles/edxapp/defaults/main.yml b/playbooks/roles/edxapp/defaults/main.yml index 2fac19744d3..42cbed20469 100644 --- a/playbooks/roles/edxapp/defaults/main.yml +++ b/playbooks/roles/edxapp/defaults/main.yml @@ -12,8 +12,8 @@ # Defaults specified here should not contain # any secrets or host identifying information. # -# Variables set to "None" will be converted to None -# when the edxapp config is written to disk. + +EDXAPP_PYTHON_VERSION: "python3.5" #stub for appsembler specific vals EDXAPP_APPSEMBLER_FEATURES: { @@ -25,8 +25,8 @@ EDXAPP_APPSEMBLER_FEATURES: { EDXAPP_TAHOE_REBUILD_SITE_STYLES: false # Bucket used for xblock file storage -EDXAPP_XBLOCK_FS_STORAGE_BUCKET: "None" -EDXAPP_XBLOCK_FS_STORAGE_PREFIX: "None" +EDXAPP_XBLOCK_FS_STORAGE_BUCKET: !!null +EDXAPP_XBLOCK_FS_STORAGE_PREFIX: !!null EDXAPP_DJFS: type: 'osfs' directory_root: '{{ edxapp_data_dir }}/django-pyfs/static/django-pyfs' @@ -51,8 +51,8 @@ EDXAPP_LMS_MAX_REQ: 1000 EDXAPP_CMS_MAX_REQ: 1000 # 'None' will be written out as null in # the configuration on disk -EDXAPP_AWS_ACCESS_KEY_ID: "None" -EDXAPP_AWS_SECRET_ACCESS_KEY: "None" +EDXAPP_AWS_ACCESS_KEY_ID: !!null +EDXAPP_AWS_SECRET_ACCESS_KEY: !!null EDXAPP_AWS_QUERYSTRING_AUTH: false EDXAPP_AWS_STORAGE_BUCKET_NAME: "SET-ME-PLEASE (ex. bucket-name)" # An empty string makes the course import/export functionality to use the @@ -60,21 +60,21 @@ EDXAPP_AWS_STORAGE_BUCKET_NAME: "SET-ME-PLEASE (ex. bucket-name)" EDXAPP_IMPORT_EXPORT_BUCKET: "" EDXAPP_AWS_S3_CUSTOM_DOMAIN: "SET-ME-PLEASE (ex. bucket-name.s3.amazonaws.com)" EDXAPP_AWS_S3_REGION_NAME: "" -EDXAPP_SWIFT_USERNAME: "None" -EDXAPP_SWIFT_KEY: "None" -EDXAPP_SWIFT_TENANT_ID: "None" -EDXAPP_SWIFT_TENANT_NAME: "None" -EDXAPP_SWIFT_AUTH_URL: "None" -EDXAPP_SWIFT_AUTH_VERSION: "None" -EDXAPP_SWIFT_REGION_NAME: "None" +EDXAPP_SWIFT_USERNAME: !!null +EDXAPP_SWIFT_KEY: !!null +EDXAPP_SWIFT_TENANT_ID: !!null +EDXAPP_SWIFT_TENANT_NAME: !!null +EDXAPP_SWIFT_AUTH_URL: !!null +EDXAPP_SWIFT_AUTH_VERSION: !!null +EDXAPP_SWIFT_REGION_NAME: !!null EDXAPP_SWIFT_USE_TEMP_URLS: false -EDXAPP_SWIFT_TEMP_URL_KEY: "None" +EDXAPP_SWIFT_TEMP_URL_KEY: !!null EDXAPP_SWIFT_TEMP_URL_DURATION: 1800 # seconds EDXAPP_DEFAULT_FILE_STORAGE: "django.core.files.storage.FileSystemStorage" EDXAPP_XQUEUE_BASIC_AUTH: [ "{{ COMMON_HTPASSWD_USER }}", "{{ COMMON_HTPASSWD_PASS }}" ] EDXAPP_XQUEUE_DJANGO_AUTH: username: 'lms' - password: 'password' + password: "{{ COMMON_XQUEUE_LMS_PASSWORD }}" EDXAPP_XQUEUE_URL: 'http://localhost:18040' # Comma-separated list of hosts/ips @@ -97,6 +97,7 @@ EDXAPP_MONGO_SSL_CA_CERT_PATH: !!null # EDXAPP_MONGO_SSL_CA_CERT_PATH: "{{ EDXAPP_CFG_DIR }}/ca.pem" EDXAPP_MONGO_REPLICA_SET: '' +EDXAPP_MONGO_AUTH_DB: '' # Used only if EDXAPP_MONGO_REPLICA_SET is provided. EDXAPP_MONGO_CMS_READ_PREFERENCE: 'PRIMARY' EDXAPP_MONGO_LMS_READ_PREFERENCE: 'SECONDARY_PREFERRED' @@ -163,6 +164,7 @@ EDXAPP_LOG_LEVEL: 'INFO' EDXAPP_MEMCACHE: [ 'localhost:11211' ] EDXAPP_CACHE_COURSE_STRUCTURE_MEMCACHE: "{{ EDXAPP_MEMCACHE }}" +EDXAPP_CACHE_BACKEND: 'django.core.cache.backends.memcached.MemcachedCache' EDXAPP_COMMENTS_SERVICE_URL: 'http://localhost:18080' # EDXAPP_COMMENTS_SERVICE_KEY must match FORUM_API_KEY EDXAPP_COMMENTS_SERVICE_KEY: 'password' @@ -182,13 +184,15 @@ EDXAPP_ZENDESK_URL: "" EDXAPP_ZENDESK_API_KEY: "" EDXAPP_ZENDESK_CUSTOM_FIELDS: {} EDXAPP_ZENDESK_OAUTH_ACCESS_TOKEN: "" +EDXAPP_ZENDESK_GROUP_ID_MAPPING: {} EDXAPP_CELERY_USER: 'celery' EDXAPP_CELERY_PASSWORD: 'celery' EDXAPP_CELERY_BROKER_HOSTNAME: "{{ EDXAPP_RABBIT_HOSTNAME }}" EDXAPP_CELERY_BROKER_TRANSPORT: 'amqp' EDXAPP_CELERY_BROKER_VHOST: "" EDXAPP_CELERY_BROKER_USE_SSL: false -EDXAPP_CELERY_EVENT_QUEUE_TTL: "None" +EDXAPP_CELERY_EVENT_QUEUE_TTL: !!null +EDXAPP_CELERY_TIMEZONE: "UTC" EDXAPP_BRANCH_IO_KEY: "" EDXAPP_CELERY_BEAT_LMS_ENABLED: false @@ -299,7 +303,7 @@ EDXAPP_ACE_ENABLED_CHANNELS: ['django_email'] EDXAPP_ACE_ENABLED_POLICIES: ['bulk_email_optout'] EDXAPP_ACE_CHANNEL_SAILTHRU_DEBUG: True EDXAPP_ACE_CHANNEL_SAILTHRU_TEMPLATE_NAME: !!null -EDXAPP_ACE_ROUTING_KEY: 'edx.lms.core.low' +EDXAPP_ACE_ROUTING_KEY: 'edx.lms.core.default' EDXAPP_ACE_CHANNEL_DEFAULT_EMAIL: 'django_email' EDXAPP_ACE_CHANNEL_TRANSACTIONAL_EMAIL: 'django_email' @@ -309,6 +313,12 @@ EDXAPP_SHOW_HEADER_LANGUAGE_SELECTOR: false # Display a language selector in the LMS footer. EDXAPP_SHOW_FOOTER_LANGUAGE_SELECTOR: false +# Configure x_frame_options in LMS/CMS +EDXAPP_X_FRAME_OPTIONS: "DENY" + +# Reset functionality for master's integration environments +EDXAPP_ENABLE_ENROLLMENT_RESET: false + EDXAPP_FEATURES: AUTH_USE_OPENID_PROVIDER: true ENABLE_DISCUSSION_SERVICE: true @@ -316,6 +326,7 @@ EDXAPP_FEATURES: PREVIEW_LMS_BASE: "{{ EDXAPP_PREVIEW_LMS_BASE }}" ENABLE_GRADE_DOWNLOADS: true ENABLE_MKTG_SITE: "{{ EDXAPP_ENABLE_MKTG_SITE }}" + ENABLE_PUBLISHER: "{{ EDXAPP_ENABLE_PUBLISHER }}" AUTOMATIC_AUTH_FOR_TESTING: "{{ EDXAPP_ENABLE_AUTO_AUTH }}" ENABLE_THIRD_PARTY_AUTH: "{{ EDXAPP_ENABLE_THIRD_PARTY_AUTH }}" ENABLE_VIDEO_UPLOAD_PIPELINE: false @@ -336,6 +347,7 @@ EDXAPP_FEATURES: ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES: "{{ EDXAPP_ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES }}" SHOW_HEADER_LANGUAGE_SELECTOR: "{{ EDXAPP_SHOW_HEADER_LANGUAGE_SELECTOR }}" SHOW_FOOTER_LANGUAGE_SELECTOR: "{{ EDXAPP_SHOW_FOOTER_LANGUAGE_SELECTOR }}" + ENABLE_ENROLLMENT_RESET: false # Enable wiki by default EDXAPP_WIKI_ENABLED: true @@ -345,6 +357,7 @@ EDXAPP_WIKI_ENABLED: true # as the lms (it's sent in the request) EDXAPP_SITE_NAME: 'localhost' EDXAPP_LMS_SITE_NAME: "{{ EDXAPP_SITE_NAME }}" +EDXAPP_LMS_HTTPS: 'on' EDXAPP_CMS_SITE_NAME: 'localhost' EDXAPP_MEDIA_URL: "/media" EDXAPP_FEEDBACK_SUBMISSION_EMAIL: "" @@ -400,10 +413,19 @@ EDXAPP_UNIVERSITY_EMAIL: 'university@example.com' EDXAPP_PRESS_EMAIL: 'press@example.com' EDXAPP_LMS_ROOT_URL: "{{ EDXAPP_LMS_BASE_SCHEME | default('https') }}://{{ EDXAPP_LMS_BASE }}" EDXAPP_LMS_INTERNAL_ROOT_URL: "{{ EDXAPP_LMS_ROOT_URL }}" + EDXAPP_LMS_ISSUER: "{{ COMMON_JWT_ISSUER }}" EDXAPP_JWT_EXPIRATION: 30 # Number of seconds until expiration EDXAPP_JWT_AUDIENCE: "{{ COMMON_JWT_AUDIENCE }}" EDXAPP_JWT_SECRET_KEY: "{{ COMMON_JWT_SECRET_KEY }}" +EDXAPP_JWT_PUBLIC_SIGNING_JWK_SET: "{{ COMMON_JWT_PUBLIC_SIGNING_JWK_SET|string }}" +EDXAPP_JWT_AUTH_COOKIE_HEADER_PAYLOAD: '{{ COMMON_JWT_AUTH_COOKIE_HEADER_PAYLOAD }}' +EDXAPP_JWT_AUTH_COOKIE_SIGNATURE: '{{ COMMON_JWT_AUTH_COOKIE_SIGNATURE }}' +EDXAPP_JWT_AUTH_REFRESH_COOKIE: '{{ COMMON_JWT_AUTH_REFRESH_COOKIE }}' + +# See https://github.com/edx/edx-platform/blob/master/openedx/core/djangoapps/oauth_dispatch/docs/decisions/0008-use-asymmetric-jwts.rst +EDXAPP_JWT_SIGNING_ALGORITHM: !!null +EDXAPP_JWT_PRIVATE_SIGNING_JWK: !!null EDXAPP_PLATFORM_TWITTER_ACCOUNT: '@YourPlatformTwitterAccount' EDXAPP_PLATFORM_FACEBOOK_ACCOUNT: 'http://www.facebook.com/YourPlatformFacebookAccount' @@ -424,6 +446,7 @@ EDXAPP_CMS_ENV_EXTRA: "{{ EDXAPP_ENV_EXTRA }}" EDXAPP_LMS_AUTH_EXTRA: "{{ EDXAPP_AUTH_EXTRA }}" EDXAPP_CMS_AUTH_EXTRA: "{{ EDXAPP_AUTH_EXTRA }}" EDXAPP_ENABLE_MKTG_SITE: false +EDXAPP_ENABLE_PUBLISHER: false EDXAPP_MKTG_URL_LINK_MAP: {} EDXAPP_MKTG_URLS: {} EDXAPP_SUPPORT_SITE_LINK: '' @@ -482,13 +505,13 @@ EDXAPP_SANDBOX_ENFORCE: true # over SSH and the fully qualified command that they can run via sudo to the # application user -EDXAPP_AUTOMATED_USERS: - automated_user: - sudo_commands: - - command: "{{ edxapp_venv_bin }}/python {{ edxapp_code_dir }}/manage.py lms showmigrations --settings={{ edxapp_settings }}" - sudo_user: "edxapp" - authorized_keys: - - "SSH authorized key" +EDXAPP_AUTOMATED_USERS: {} + # automated_user: + # sudo_commands: + # - command: "{{ edxapp_venv_bin }}/python {{ edxapp_code_dir }}/manage.py lms showmigrations --settings={{ edxapp_settings }}" + # sudo_user: "edxapp" + # authorized_keys: + # - "SSH authorized key" EDXAPP_USE_GIT_IDENTITY: false # Paste the contents of the git identity @@ -523,33 +546,54 @@ EDXAPP_EXTRA_REQUIREMENTS: [] # - name: git+https://git.myproject.org/MyProject#egg=MyProject EDXAPP_PRIVATE_REQUIREMENTS: # For Harvard courses: - - name: xblock-problem-builder==3.1.3 + - name: xblock-problem-builder==4.0.0 # Oppia XBlock - - name: git+https://github.com/oppia/xblock.git@9f6b95b7eb7dbabb96b77198a3202604f96adf65#egg=oppia-xblock - extra_args: -e - # Microsoft's Office Mix XBlock - - name: git+https://github.com/OfficeDev/xblock-officemix.git@3f876b5f0267b017812620239533a29c7d562d24#egg=officemix + # https://github.com/oppia/xblock/pull/4 + - name: git+https://github.com/edx/oppia-xblock.git@1030adb3590ad2d32c93443cc8690db0985d76b6#egg=oppia-xblock extra_args: -e # This repository contains schoolyourself-xblock, which is used in # edX's "AlgebraX" and "GeometryX" courses. - - name: git+https://github.com/schoolyourself/schoolyourself-xblock.git@5e4d37716e3e72640e832e961f7cc0d38d4ec47b#egg=schoolyourself-xblock + - name: git+https://github.com/edx/schoolyourself-xblock.git@c0c980be0a0fd00a653afc80d2cfd147f8f8987d#egg=schoolyourself-xblock extra_args: -e # Prototype XBlocks from edX learning sciences limited roll-outs and user testing. # Concept XBlock, in particular, is nowhere near finished and an early prototype. # Profile XBlock is there so we can play with XBlock arguments in the platform, but isn't ready for use outside of # edX. - - name: git+https://github.com/edx/ConceptXBlock.git@2376fde9ebdd83684b78dde77ef96361c3bd1aa0#egg=concept-xblock + - name: git+https://github.com/edx/ConceptXBlock.git@a45a6560c92b6d8b62be1f939ff1d00dfff84e70#egg=concept-xblock extra_args: -e - - name: git+https://github.com/edx/AudioXBlock.git@1fbf19cc21613aead62799469e1593adb037fdd9#egg=audio-xblock + - name: git+https://github.com/edx/AudioXBlock.git@20538c6e9bb704801a71ecbb6981f794556dfc45#egg=audio-xblock extra_args: -e - - name: git+https://github.com/edx/AnimationXBlock.git@d2b551bb8f49a138088e10298576102164145b87#egg=animation-xblock + - name: git+https://github.com/edx/AnimationXBlock.git@c950ffdda2f69effda93bf03df8646f61d3ffada#egg=animation-xblock extra_args: -e # Peer instruction XBlock - - name: ubcpi-xblock==0.6.4 + # Need it from github until we can land https://github.com/ubc/ubcpi/pull/167 upstream. + - name: git+https://github.com/edx/ubcpi.git@7b7a54ef4b99614f749128d3a1f47d31c143e25b#egg=ubcpi-xblock + extra_args: -e # Vector Drawing and ActiveTable XBlocks (Davidson) - - name: git+https://github.com/open-craft/xblock-vectordraw.git@c57df9d98119fd2ca4cb31b9d16c27333cdc65ca#egg=xblock-vectordraw==0.2.1 + - name: git+https://github.com/open-craft/xblock-vectordraw.git@76976425356dfc7f13570f354c0c438db84c2840#egg=xblock-vectordraw==0.3.0 + extra_args: -e + - name: git+https://github.com/open-craft/xblock-activetable.git@013003aa3ce28f0ae03b8227dc3a6daa4e19997d#egg=xblock-activetable + extra_args: -e + - name: git+https://github.com/edx/edx-zoom.git@37c323ae93265937bf60abb92657318efeec96c5#egg=edx-zoom + extra_args: -e + # Stanford-developed XBlocks (technically unsupported, but here to ease migration of courses from Lagunita) + - name: git+https://github.com/edx/xblock-qualtrics-survey.git@b46c55d26bee615ff76051685277c45f762f9495#egg=xblock_qualtrics_survey + extra_args: -e + - name: git+https://github.com/edx/xblock-in-video-quiz.git@c1cc11f87285cd885d76604145433dae87434a6d#egg=invideoquiz-xblock + extra_args: -e + - name: git+https://github.com/edx/xblock-submit-and-compare@338cb3dd3c4ff5c50d509f34baaaf451255fdbc3#egg=xblock-submit-and-compare + extra_args: -e + - name: git+https://github.com/edx/xblock-free-text-response@cc85ca0d4aaab26c0362667c207d99dd0622878a#egg=xblock-free-text-response + extra_args: -e + - name: git+https://github.com/edx/xblock-sql-grader@acf6c20157616c668ee8a1ca13a0b457ec79a60c#egg=xblock-sql-grader + extra_args: -e + - name: git+https://github.com/edx/xblock-image-modal@9c19e426ea6118c1433e29cfccc1a88a2346510d#egg=xblock-image-modal extra_args: -e - - name: git+https://github.com/open-craft/xblock-activetable.git@e933d41bb86a8d50fb878787ca680165a092a6d5#egg=xblock-activetable + # XBlocks associated with the LabXchange project + - name: git+https://github.com/open-craft/labxchange-xblocks.git@3830256088845c23eedaf6bc8c5b29c0ebc46fbb#egg=labxchange-xblocks + extra_args: -e + # "Pathways" learning context plugin for the LabXchange project + - name: git+https://github.com/open-craft/lx-pathway-plugin.git@337abf249b7c5ecc1e78a44d2e639e1ab65f2085#egg=lx-pathway-plugin extra_args: -e # List of custom middlewares that should be used in edxapp to process @@ -557,15 +601,15 @@ EDXAPP_PRIVATE_REQUIREMENTS: # qualify Python classes or functions that can be used as Django middleware. EDXAPP_EXTRA_MIDDLEWARE_CLASSES: [] -EDXAPP_GOOGLE_ANALYTICS_ACCOUNT: "None" +EDXAPP_GOOGLE_ANALYTICS_ACCOUNT: !!null EDXAPP_GOOGLE_ANALYTICS_LINKEDIN: "" EDXAPP_GOOGLE_ANALYTICS_TRACKING_ID: "" EDXAPP_GOOGLE_SITE_VERIFICATION_ID: "" -EDXAPP_OPTIMIZELY_PROJECT_ID: "None" +EDXAPP_OPTIMIZELY_PROJECT_ID: !!null EDXAPP_TRACKING_SEGMENTIO_WEBHOOK_SECRET: "" -EDXAPP_CMS_SEGMENT_KEY: "None" -EDXAPP_LMS_SEGMENT_KEY: "None" +EDXAPP_CMS_SEGMENT_KEY: !!null +EDXAPP_LMS_SEGMENT_KEY: !!null EDXAPP_ENABLE_SEGMENT_SITE: False EDXAPP_EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST: [] @@ -597,6 +641,9 @@ EDXAPP_XML_S3_KEY: !!null EDXAPP_NEWRELIC_LMS_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-edxapp-lms" EDXAPP_NEWRELIC_CMS_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-edxapp-cms" EDXAPP_NEWRELIC_WORKERS_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-edxapp-workers" +EDXAPP_LMS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false +EDXAPP_CMS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false +EDXAPP_WORKERS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false EDXAPP_ORA2_FILE_PREFIX: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/ora2' EDXAPP_FILE_UPLOAD_STORAGE_BUCKET_NAME: '{{ EDXAPP_AWS_STORAGE_BUCKET_NAME }}' @@ -635,11 +682,6 @@ EDXAPP_REGISTRATION_EXTRA_FIELDS: country: "required" EDXAPP_CELERY_WORKERS: - - queue: low - service_variant: cms - concurrency: 1 - monitor: True - prefetch_optimization: default - queue: default service_variant: cms concurrency: 1 @@ -650,11 +692,6 @@ EDXAPP_CELERY_WORKERS: concurrency: 1 monitor: True prefetch_optimization: default - - queue: low - service_variant: lms - concurrency: 1 - monitor: True - prefetch_optimization: default - queue: default service_variant: lms concurrency: 1 @@ -673,7 +710,8 @@ EDXAPP_CELERY_WORKERS: prefetch_optimization: default EDXAPP_RECALCULATE_GRADES_ROUTING_KEY: 'edx.lms.core.default' EDXAPP_POLICY_CHANGE_GRADES_ROUTING_KEY: 'edx.lms.core.default' -EDXAPP_BULK_EMAIL_ROUTING_KEY_SMALL_JOBS: 'edx.lms.core.low' +EDXAPP_BULK_EMAIL_ROUTING_KEY_SMALL_JOBS: 'edx.lms.core.default' +EDXAPP_PROGRAM_CERTIFICATES_ROUTING_KEY: 'edx.lms.core.default' EDXAPP_LMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'lms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.lms.core.\\1')|list }}" EDXAPP_CMS_CELERY_QUEUES: "{{ edxapp_workers|selectattr('service_variant', 'equalto', 'cms')|map(attribute='queue')|map('regex_replace', '(.*)', 'edx.cms.core.\\1')|list }}" @@ -699,6 +737,14 @@ EDXAPP_CORS_ORIGIN_ALLOW_ALL: false EDXAPP_CROSS_DOMAIN_CSRF_COOKIE_DOMAIN: "" EDXAPP_CROSS_DOMAIN_CSRF_COOKIE_NAME: "" EDXAPP_CSRF_COOKIE_SECURE: false +EDXAPP_CSRF_TRUSTED_ORIGINS: [] +EDXAPP_LOGIN_REDIRECT_WHITELIST: [] + +# edx-rbac Related Settings +EDXAPP_SYSTEM_WIDE_ROLE_CLASSES: [] + +# Setting for enterprise marketing footer query params +EDXAPP_ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS: {} # E-Commerce Related Settings EDXAPP_ECOMMERCE_PUBLIC_URL_ROOT: "http://localhost:8002" @@ -709,9 +755,14 @@ EDXAPP_COURSE_CATALOG_API_URL: "http://localhost:8008/api/v1" EDXAPP_CREDENTIALS_INTERNAL_SERVICE_URL: "http://localhost:8005" EDXAPP_CREDENTIALS_PUBLIC_SERVICE_URL: "http://localhost:8005" -# Journals Related Settings -EDXAPP_JOURNALS_URL_ROOT: "https://journals-{{ EDXAPP_LMS_BASE }}" -EDXAPP_JOURNALS_API_URL: "https://journals-{{ EDXAPP_LMS_BASE }}/api/v1/" +# Learner portal settings +EDXAPP_LEARNER_PORTAL_URL_ROOT: "https://learner-portal-{{ EDXAPP_LMS_BASE }}" +# Blockstore Related Settings +EDXAPP_BLOCKSTORE_PUBLIC_URL_ROOT: "http://localhost:18250" +EDXAPP_BLOCKSTORE_API_URL: "http://localhost:18250/api/v1" + +# List of all logout URIs for IDAs which have been converted from using DOP to using DOT. +EDXAPP_IDA_LOGOUT_URI_LIST: [] # which access.py permission name to check in order to determine if a course about page is # visible. We default this to the legacy permission 'see_exists'. @@ -736,7 +787,7 @@ EDXAPP_SOCIAL_SHARING_SETTINGS: # EDXAPP_PROFILE_IMAGE_BACKEND_CONFIG: # class: storages.backends.s3boto.S3BotoStorage # options: -# location: /path/to/images +# location: path/to/images # Note: The location should not begin with a leading slash. # bucket: mybucket # custom_domain: mybucket.s3.amazonaws.com # access_key: XXXAWS_ACCESS_KEYXXX @@ -747,7 +798,7 @@ EDXAPP_SOCIAL_SHARING_SETTINGS: #NB2: custom_domain is REQUIRED. Otherwise, boto will generate a # temporary URL whenever asked for the URL of a specific file. EDXAPP_PROFILE_IMAGE_BACKEND: - class: storages.backends.overwrite.OverwriteStorage + class: openedx.core.storage.OverwriteStorage options: location: "{{ edxapp_media_dir }}/profile-images/" base_url: "{{ EDXAPP_MEDIA_URL }}/profile-images/" @@ -786,17 +837,21 @@ EDXAPP_CREDIT_PROVIDER_SECRET_KEYS: {} # Proctoring configuration (redirct URLs and keys shared between systems) EDXAPP_PROCTORING_SETTINGS: {} -EDXAPP_PROCTORING_BACKEND_PROVIDER: - class: 'edx_proctoring.backends.null.NullBackendProvider' - options: {} - +EDXAPP_PROCTORING_BACKENDS: + DEFAULT: "null" + # The null key needs to be quoted because + # null is a language independent type in YAML + "null": {} + +# Configuration needed for the retirement service +EDXAPP_RETIREMENT_SERVICE_USER_EMAIL: "retirement_worker@example.com" +EDXAPP_RETIREMENT_SERVICE_USER_NAME: "retirement_worker" EDXAPP_RETIRED_USERNAME_PREFIX: "retired__user_" EDXAPP_RETIRED_EMAIL_PREFIX: "retired__user_" EDXAPP_RETIRED_EMAIL_DOMAIN: "retired.invalid" EDXAPP_RETIRED_USER_SALTS: - "OVERRIDE ME WITH A RANDOM VALUE" - "ROTATE SALTS BY APPENDING NEW VALUES" -EDXAPP_RETIREMENT_SERVICE_WORKER_USERNAME: "OVERRIDE THIS WITH A VALID LMS USERNAME" # These get loaded into database models per environment via management command # These are the required states, environmental overrides are in edx-internal. EDXAPP_RETIREMENT_STATES: @@ -805,6 +860,8 @@ EDXAPP_RETIREMENT_STATES: - "ABORTED" - "COMPLETE" +EDXAPP_USERNAME_REPLACEMENT_WORKER: "OVERRIDE THIS WITH A VALID USERNAME" + # Comprehensive Theming # Deprecated, maintained for backward compatibility EDXAPP_COMPREHENSIVE_THEME_DIR: "" @@ -864,6 +921,11 @@ EDXAPP_CLEARSESSIONS_CRON_ENABLED: false EDXAPP_CLEARSESSIONS_CRON_HOURS: "14" EDXAPP_CLEARSESSIONS_CRON_MINUTES: "0" +# Add additional cron jobs from the given list. +# See ansible docs for valid options for these items: +# https://docs.ansible.com/ansible/latest/modules/cron_module.html +EDXAPP_ADDITIONAL_CRON_JOBS: [] + EDXAPP_VIDEO_IMAGE_MAX_AGE: 31536000 # This is django storage configuration for Video Image settings. @@ -872,7 +934,7 @@ EDXAPP_VIDEO_IMAGE_SETTINGS: VIDEO_IMAGE_MAX_BYTES : 2097152 VIDEO_IMAGE_MIN_BYTES : 2048 STORAGE_KWARGS: - location: "{{ edxapp_media_dir }}/" + location: "{{ edxapp_media_dir_s3 }}/" base_url: "{{ EDXAPP_MEDIA_URL }}/" DIRECTORY_PREFIX: 'video-images/' @@ -882,7 +944,7 @@ EDXAPP_VIDEO_TRANSCRIPTS_MAX_AGE: 31536000 EDXAPP_VIDEO_TRANSCRIPTS_SETTINGS: VIDEO_TRANSCRIPTS_MAX_BYTES : 3145728 STORAGE_KWARGS: - location: "{{ edxapp_media_dir }}/" + location: "{{ edxapp_media_dir_s3 }}/" base_url: "{{ EDXAPP_MEDIA_URL }}/" DIRECTORY_PREFIX: 'video-transcripts/' @@ -903,10 +965,6 @@ EDXAPP_BLOCK_STRUCTURES_SETTINGS: PRUNING_ACTIVE: false -# Configuration needed for LMS to communicate with the Journals service -JOURNALS_SERVICE_USER_EMAIL: "journals_worker@example.com" -JOURNALS_SERVICE_USER_NAME: "journals_worker" - # Configuration needed for LMS to communicate with the Discovery service DISCOVERY_SERVICE_USER_EMAIL: "discovery_worker@example.com" DISCOVERY_SERVICE_USER_NAME: "discovery_worker" @@ -923,6 +981,14 @@ CREDENTIALS_SERVICE_USER_NAME: "credentials_worker" INSIGHTS_SERVICE_USER_EMAIL: "insights_worker@example.com" INSIGHTS_SERVICE_USER_NAME: "insights_worker" +# Configuration needed for LMS to communicate with the Credentials service +REGISTRAR_SERVICE_USER_EMAIL: "registrar_worker@example.com" +REGISTRAR_SERVICE_USER_NAME: "registrar_worker" + +# Configuration needed for LMS to communicate with the Designer service +DESIGNER_SERVICE_USER_EMAIL: "designer_worker@example.com" +DESIGNER_SERVICE_USER_NAME: "designer_worker" + # Configuration settings needed for the LMS to communicate with the Enterprise service. EDXAPP_ENTERPRISE_API_URL: "{{ EDXAPP_LMS_INTERNAL_ROOT_URL }}/enterprise/api/v1" @@ -939,10 +1005,10 @@ EDXAPP_ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES: - audit - honor -EDXAPP_ENTERPRISE_ENROLLMENT_API_URL: "{{ EDXAPP_LMS_INTERNAL_ROOT_URL }}/api/enrollment/v1/" +EDXAPP_ENTERPRISE_CUSTOMER_SUCCESS_EMAIL: "customersuccess@edx.org" +EDXAPP_ENTERPRISE_INTEGRATIONS_EMAIL: "enterprise-integrations@edx.org" -# The default value of this needs to be a 16 character string -EDXAPP_ENTERPRISE_REPORTING_SECRET: '0000000000000000' +EDXAPP_ENTERPRISE_ENROLLMENT_API_URL: "{{ EDXAPP_LMS_INTERNAL_ROOT_URL }}/api/enrollment/v1/" EDXAPP_ENTERPRISE_SUPPORT_URL: '' @@ -950,14 +1016,20 @@ EDXAPP_ENTERPRISE_TAGLINE: '' # The assigned ICP license number for display in the platform footer EDXAPP_ICP_LICENSE: !!null +EDXAPP_ICP_LICENSE_INFO: {} # Base Cookie Domain to share cookie across edx domains EDXAPP_BASE_COOKIE_DOMAIN: "{{ EDXAPP_LMS_SITE_NAME }}" # Account password configuration -EDXAPP_PASSWORD_MIN_LENGTH: 2 -EDXAPP_PASSWORD_MAX_LENGTH: 75 -EDXAPP_PASSWORD_COMPLEXITY: {} +EDXAPP_AUTH_PASSWORD_VALIDATORS: + - NAME: 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' + - NAME: 'util.password_policy_validators.MinimumLengthValidator' + OPTIONS: + min_length: 2 + - NAME: 'util.password_policy_validators.MaximumLengthValidator' + OPTIONS: + max_length: 75 # The age at which a learner no longer requires parental consent, or None EDXAPP_PARENTAL_CONSENT_AGE_LIMIT: 13 @@ -972,6 +1044,33 @@ EDXAPP_MAINTENANCE_BANNER_TEXT: "Sample banner message" EDXAPP_PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG: ENFORCE_COMPLIANCE_ON_LOGIN: false +# Needed to link the LMS instructor dashboard to the writable gradebook micro-frontend +EDXAPP_LMS_WRITABLE_GRADEBOOK_URL: null + +# Needed to link to the new profile micro-frontend. +EDXAPP_PROFILE_MICROFRONTEND_URL: null + +# Needed to link to the new order history micro-frontend. +EDXAPP_ORDER_HISTORY_MICROFRONTEND_URL: null + +# Needed to link to the new account micro-frontend. +EDXAPP_ACCOUNT_MICROFRONTEND_URL: null + +# Needed to link to the new program manager micro-frontend. +EDXAPP_PROGRAM_MANAGER_MICROFRONTEND_URL: null + +# Needed to link to the learning micro-frontend. +EDXAPP_LEARNING_MICROFRONTEND_URL: null + +# Remote config +EDXAPP_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +EDXAPP_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +EDXAPP_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +# Dashboard courses default limit +EDXAPP_DASHBOARD_COURSE_LIMIT: null + #-------- Everything below this line is internal to the role ------------ #Use YAML references (& and *) and hash merge <<: to factor out shared settings @@ -986,19 +1085,26 @@ edxapp_venv_bin: "{{ edxapp_venv_dir }}/bin" edxapp_nodeenv_dir: "{{ edxapp_app_dir }}/nodeenvs/edxapp" edxapp_nodeenv_bin: "{{ edxapp_nodeenv_dir }}/bin" edxapp_settings: '{{ EDXAPP_SETTINGS }}' -edxapp_node_version: "{{ common_node_version }}" +EDXAPP_NODE_VERSION: "12" +edxapp_node_version: "{{ EDXAPP_NODE_VERSION }}" # This is where node installs modules, not node itself edxapp_node_bin: "{{ edxapp_code_dir }}/node_modules/.bin" edxapp_user: edxapp +edxapp_user_createhome: 'no' +edxapp_user_shell: '/bin/false' edxapp_deploy_path: "{{ edxapp_venv_bin }}:{{ edxapp_code_dir }}/bin:{{ edxapp_node_bin }}:{{ edxapp_nodeenv_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" edxapp_staticfile_dir: "{{ edxapp_data_dir }}/staticfiles" edxapp_media_dir: "{{ edxapp_data_dir }}/media" +edxapp_media_dir_s3: "{{ edxapp_media_dir | regex_replace('^\\/', '') }}" edxapp_course_static_dir: "{{ edxapp_data_dir }}/course_static" edxapp_course_data_dir: "{{ edxapp_data_dir }}/data" edxapp_upload_dir: "{{ edxapp_data_dir }}/uploads" edxapp_theme_dir: "{{ edxapp_data_dir }}/themes" edxapp_git_identity: "{{ edxapp_app_dir }}/edxapp-git-identity" edxapp_git_ssh: "/tmp/edxapp_git_ssh.sh" +edxapp_studio_cfg: "{{ COMMON_CFG_DIR }}/studio.yml" +edxapp_lms_cfg: "{{ COMMON_CFG_DIR }}/lms.yml" +edxapp_revision_cfg: "{{ COMMON_CFG_DIR }}/revisions.yml" edxapp_devstack_logs: - "{{ supervisor_log_dir }}/cms-stdout.log" @@ -1015,6 +1121,7 @@ EDXAPP_WORKER_DEFAULT_STOPWAITSECS: 432000 # setup for python codejail edxapp_sandbox_venv_dir: '{{ edxapp_venvs_dir }}/edxapp-sandbox' edxapp_sandbox_user: 'sandbox' # I think something about the codejail requires hardcoding this to sandbox:sandbox +edxapp_sandbox_python_version: 'python3.5' # change to 'python2.7' if you want to go back to the old setting. # apparmor command edxapp_aa_command: "{% if EDXAPP_SANDBOX_ENFORCE %}aa-enforce{% else %}aa-complain{% endif %}" @@ -1039,11 +1146,14 @@ edxapp_environment_default: # be updated to /edx/etc/edxapp when the switch to # yaml based configs is complete CONFIG_ROOT: "{{ edxapp_app_dir }}" + LMS_CFG: "{{ edxapp_lms_cfg }}" + STUDIO_CFG: "{{ edxapp_studio_cfg }}" BOTO_CONFIG: "{{ edxapp_app_dir }}/.boto" # honeycomb config, if set needs to be made available # to gunicorn HONEYCOMB_WRITEKEY: "{{ honeycomb_writekey | default(None) }}" HONEYCOMB_DATASET: "{{ honeycomb_dataset | default(None) }}" + REVISION_CFG: "{{ edxapp_revision_cfg }}" edxapp_environment_extra: {} @@ -1064,7 +1174,7 @@ edxapp_generic_contentstore_config: &edxapp_generic_default_contentstore ssl: "{{ EDXAPP_MONGO_USE_SSL }}" ssl_certfile: "{{ EDXAPP_MONGO_SSL_CLIENT_CERT_PATH }}" ssl_ca_certs: "{{ EDXAPP_MONGO_SSL_CA_CERT_PATH }}" - authSource: "{{ EDXAPP_MONGO_AUTH_SOURCE }}" + auth_source: "{{ EDXAPP_MONGO_AUTH_DB }}" edxapp_generic_doc_store_config: &edxapp_generic_default_docstore db: "{{ EDXAPP_MONGO_DB_NAME }}" @@ -1081,7 +1191,7 @@ edxapp_generic_doc_store_config: &edxapp_generic_default_docstore socketTimeoutMS: 3000 # default is never timeout while the connection is open, this means it needs to explicitly close raising pymongo.errors.NetworkTimeout connectTimeoutMS: 2000 # default is 20000, I believe raises pymongo.errors.ConnectionFailure # Not setting waitQueueTimeoutMS and waitQueueMultiple since pymongo defaults to nobody being allowed to wait - authSource: "{{ EDXAPP_MONGO_AUTH_SOURCE }}" # Needed for mongo atlas + authsource: "{{ EDXAPP_MONGO_AUTH_DB }}" EDXAPP_LMS_DRAFT_DOC_STORE_CONFIG: <<: *edxapp_generic_default_docstore @@ -1159,8 +1269,10 @@ edxapp_generic_auth_config: &edxapp_generic_auth ZENDESK_USER: "{{ EDXAPP_ZENDESK_USER }}" ZENDESK_API_KEY: "{{ EDXAPP_ZENDESK_API_KEY }}" ZENDESK_OAUTH_ACCESS_TOKEN: "{{ EDXAPP_ZENDESK_OAUTH_ACCESS_TOKEN }}" + ZENDESK_GROUP_ID_MAPPING: "{{ EDXAPP_ZENDESK_GROUP_ID_MAPPING }}" CELERY_BROKER_USER: "{{ EDXAPP_CELERY_USER }}" CELERY_BROKER_PASSWORD: "{{ EDXAPP_CELERY_PASSWORD }}" + CELERY_TIMEZONE: "{{ EDXAPP_CELERY_TIMEZONE }}" GOOGLE_ANALYTICS_ACCOUNT: "{{ EDXAPP_GOOGLE_ANALYTICS_ACCOUNT }}" DJFS: "{{ EDXAPP_DJFS }}" CREDIT_PROVIDER_SECRET_KEYS: "{{ EDXAPP_CREDIT_PROVIDER_SECRET_KEYS }}" @@ -1173,18 +1285,24 @@ edxapp_generic_auth_config: &edxapp_generic_auth FACEBOOK_API_VERSION: "{{ EDXAPP_FACEBOOK_API_VERSION }}" ENTERPRISE_SERVICE_WORKER_USERNAME: "{{ EDXAPP_ENTERPRISE_SERVICE_WORKER_USERNAME }}" BRANCH_IO_KEY: "{{ EDXAPP_BRANCH_IO_KEY }}" + PROCTORING_BACKENDS: "{{ EDXAPP_PROCTORING_BACKENDS }}" generic_cache_config: &default_generic_cache - BACKEND: 'django.core.cache.backends.memcached.MemcachedCache' + BACKEND: "{{ EDXAPP_CACHE_BACKEND }}" KEY_FUNCTION: 'util.memcache.safe_key' +edxapp_revisions_config: + EDX_PLATFORM_REVISION: "{{ edx_platform_version }}" + generic_env_config: &edxapp_generic_env + IDA_LOGOUT_URI_LIST: "{{ EDXAPP_IDA_LOGOUT_URI_LIST }}" CREDENTIALS_INTERNAL_SERVICE_URL: "{{ EDXAPP_CREDENTIALS_INTERNAL_SERVICE_URL }}" CREDENTIALS_PUBLIC_SERVICE_URL: "{{ EDXAPP_CREDENTIALS_PUBLIC_SERVICE_URL }}" ECOMMERCE_PUBLIC_URL_ROOT: "{{ EDXAPP_ECOMMERCE_PUBLIC_URL_ROOT }}" ECOMMERCE_API_URL: "{{ EDXAPP_ECOMMERCE_API_URL }}" - JOURNALS_URL_ROOT: "{{ EDXAPP_JOURNALS_URL_ROOT }}" - JOURNALS_API_URL: "{{ EDXAPP_JOURNALS_API_URL }}" + BLOCKSTORE_PUBLIC_URL_ROOT: "{{ EDXAPP_BLOCKSTORE_PUBLIC_URL_ROOT }}" + BLOCKSTORE_API_URL: "{{ EDXAPP_BLOCKSTORE_API_URL }}" + LEARNER_PORTAL_URL_ROOT: "{{ EDXAPP_LEARNER_PORTAL_URL_ROOT }}" EDX_PLATFORM_REVISION: "{{ edx_platform_version }}" ENTERPRISE_API_URL: "{{ EDXAPP_ENTERPRISE_API_URL }}" COURSE_CATALOG_API_URL: "{{ EDXAPP_COURSE_CATALOG_API_URL }}" @@ -1195,12 +1313,13 @@ generic_env_config: &edxapp_generic_env FINANCIAL_REPORTS: "{{ EDXAPP_FINANCIAL_REPORTS }}" CORS_ORIGIN_WHITELIST: "{{ EDXAPP_CORS_ORIGIN_WHITELIST }}" CORS_ORIGIN_ALLOW_ALL: "{{ EDXAPP_CORS_ORIGIN_ALLOW_ALL }}" + LOGIN_REDIRECT_WHITELIST: "{{ EDXAPP_LOGIN_REDIRECT_WHITELIST }}" CROSS_DOMAIN_CSRF_COOKIE_DOMAIN: "{{ EDXAPP_CROSS_DOMAIN_CSRF_COOKIE_DOMAIN }}" CROSS_DOMAIN_CSRF_COOKIE_NAME: "{{ EDXAPP_CROSS_DOMAIN_CSRF_COOKIE_NAME }}" CSRF_COOKIE_SECURE: "{{ EDXAPP_CSRF_COOKIE_SECURE }}" + CSRF_TRUSTED_ORIGINS: "{{ EDXAPP_CSRF_TRUSTED_ORIGINS }}" VIDEO_UPLOAD_PIPELINE: "{{ EDXAPP_VIDEO_UPLOAD_PIPELINE }}" DEPRECATED_ADVANCED_COMPONENT_TYPES: "{{ EDXAPP_DEPRECATED_ADVANCED_COMPONENT_TYPES }}" - OAUTH_OIDC_ISSUER: "{{ EDXAPP_LMS_ISSUER }}" XBLOCK_FS_STORAGE_BUCKET: "{{ EDXAPP_XBLOCK_FS_STORAGE_BUCKET }}" XBLOCK_FS_STORAGE_PREFIX: "{{ EDXAPP_XBLOCK_FS_STORAGE_PREFIX }}" ANALYTICS_DASHBOARD_URL: '{{ EDXAPP_ANALYTICS_DASHBOARD_URL }}' @@ -1254,7 +1373,6 @@ generic_env_config: &edxapp_generic_env SECRET_KEY: "{{ EDXAPP_JWT_SECRET_KEY }}" JWT_EXPIRATION: '{{ EDXAPP_JWT_EXPIRATION }}' JWT_PRIVATE_SIGNING_KEY: !!null - JWT_EXPIRED_PRIVATE_SIGNING_KEYS: [] JWT_AUTH: JWT_ISSUER: "{{ EDXAPP_LMS_ISSUER }}" JWT_AUDIENCE: "{{ EDXAPP_JWT_AUDIENCE }}" @@ -1263,6 +1381,17 @@ generic_env_config: &edxapp_generic_env - ISSUER: "{{ EDXAPP_LMS_ISSUER }}" AUDIENCE: "{{ EDXAPP_JWT_AUDIENCE }}" SECRET_KEY: "{{ EDXAPP_JWT_SECRET_KEY }}" + JWT_PUBLIC_SIGNING_JWK_SET: "{{ EDXAPP_JWT_PUBLIC_SIGNING_JWK_SET|string }}" + JWT_SIGNING_ALGORITHM: "{{ EDXAPP_JWT_SIGNING_ALGORITHM }}" + JWT_PRIVATE_SIGNING_JWK: "{{ EDXAPP_JWT_PRIVATE_SIGNING_JWK|string }}" + JWT_AUTH_COOKIE_HEADER_PAYLOAD: "{{ EDXAPP_JWT_AUTH_COOKIE_HEADER_PAYLOAD }}" + JWT_AUTH_COOKIE_SIGNATURE: "{{ EDXAPP_JWT_AUTH_COOKIE_SIGNATURE }}" + JWT_AUTH_REFRESH_COOKIE: "{{ EDXAPP_JWT_AUTH_REFRESH_COOKIE }}" + + # edx-rbac Setting + SYSTEM_WIDE_ROLE_CLASSES: "{{ EDXAPP_SYSTEM_WIDE_ROLE_CLASSES }}" + + ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS: "{{ EDXAPP_ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS }}" #must end in slash (https://docs.djangoproject.com/en/1.4/ref/settings/#media-url) MEDIA_URL: "{{ EDXAPP_MEDIA_URL }}/" @@ -1371,6 +1500,7 @@ generic_env_config: &edxapp_generic_env HELP_TOKENS_BOOKS: "{{ EDXAPP_HELP_TOKENS_BOOKS }}" # License for serving content in China ICP_LICENSE: "{{ EDXAPP_ICP_LICENSE }}" + ICP_LICENSE_INFO: "{{ EDXAPP_ICP_LICENSE_INFO }}" # Base Cookie Domain to share cookie across edx domains BASE_COOKIE_DOMAIN: "{{ EDXAPP_BASE_COOKIE_DOMAIN }}" @@ -1383,13 +1513,15 @@ generic_env_config: &edxapp_generic_env RETIRED_EMAIL_PREFIX: "{{ EDXAPP_RETIRED_EMAIL_PREFIX }}" RETIRED_EMAIL_DOMAIN: "{{ EDXAPP_RETIRED_EMAIL_DOMAIN }}" RETIRED_USER_SALTS: "{{ EDXAPP_RETIRED_USER_SALTS }}" - RETIREMENT_SERVICE_WORKER_USERNAME: "{{ EDXAPP_RETIREMENT_SERVICE_WORKER_USERNAME }}" + RETIREMENT_SERVICE_WORKER_USERNAME: "{{ EDXAPP_RETIREMENT_SERVICE_USER_NAME }}" RETIREMENT_STATES: "{{ EDXAPP_RETIREMENT_STATES }}" - PASSWORD_MIN_LENGTH: "{{ EDXAPP_PASSWORD_MIN_LENGTH }}" - PASSWORD_MAX_LENGTH: "{{ EDXAPP_PASSWORD_MAX_LENGTH }}" - PASSWORD_COMPLEXITY: "{{ EDXAPP_PASSWORD_COMPLEXITY }}" + USERNAME_REPLACEMENT_WORKER: "{{ EDXAPP_USERNAME_REPLACEMENT_WORKER }}" + + AUTH_PASSWORD_VALIDATORS: "{{ EDXAPP_AUTH_PASSWORD_VALIDATORS }}" PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG: "{{ EDXAPP_PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG }}" + # Dashboard courses default limit + DASHBOARD_COURSE_LIMIT: "{{ EDXAPP_DASHBOARD_COURSE_LIMIT }}" lms_auth_config: <<: *edxapp_generic_auth @@ -1429,11 +1561,9 @@ lms_auth_config: default_class: 'xmodule.hidden_module.HiddenDescriptor' fs_root: "{{ edxapp_course_data_dir }}" render_template: 'edxmako.shortcuts.render_to_string' - PROCTORING_BACKEND_PROVIDER: "{{ EDXAPP_PROCTORING_BACKEND_PROVIDER }}" SOCIAL_AUTH_OAUTH_SECRETS: "{{ EDXAPP_SOCIAL_AUTH_OAUTH_SECRETS }}" ACE_CHANNEL_SAILTHRU_API_KEY: "{{ EDXAPP_ACE_CHANNEL_SAILTHRU_API_KEY }}" ACE_CHANNEL_SAILTHRU_API_SECRET: "{{ EDXAPP_ACE_CHANNEL_SAILTHRU_API_SECRET }}" - ENTERPRISE_REPORTING_SECRET: "{{ EDXAPP_ENTERPRISE_REPORTING_SECRET }}" ANALYTICS_API_KEY: "{{ EDXAPP_LMS_ANALYTICS_API_KEY }}" FERNET_KEYS: "{{ EDXAPP_FERNET_KEYS }}" @@ -1447,6 +1577,7 @@ lms_env_config: PAID_COURSE_REGISTRATION_CURRENCY: "{{ EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY }}" GIT_REPO_DIR: "{{ EDXAPP_GIT_REPO_DIR }}" SITE_NAME: "{{ EDXAPP_LMS_SITE_NAME }}" + HTTPS: "{{ EDXAPP_LMS_HTTPS }}" VIDEO_CDN_URL: "{{ EDXAPP_VIDEO_CDN_URLS }}" PDF_RECEIPT_TAX_ID: "{{ EDXAPP_PDF_RECEIPT_TAX_ID }}" PDF_RECEIPT_FOOTER_TEXT: "{{ EDXAPP_PDF_RECEIPT_FOOTER_TEXT }}" @@ -1477,6 +1608,8 @@ lms_env_config: CELERY_QUEUES: "{{ EDXAPP_LMS_CELERY_QUEUES }}" ALTERNATE_WORKER_QUEUES: "cms" ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES: "{{ EDXAPP_ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES }}" + ENTERPRISE_CUSTOMER_SUCCESS_EMAIL: "{{ EDXAPP_ENTERPRISE_CUSTOMER_SUCCESS_EMAIL }}" + ENTERPRISE_INTEGRATIONS_EMAIL: "{{ EDXAPP_ENTERPRISE_INTEGRATIONS_EMAIL }}" ENTERPRISE_ENROLLMENT_API_URL: "{{ EDXAPP_ENTERPRISE_ENROLLMENT_API_URL }}" ENTERPRISE_SUPPORT_URL: "{{ EDXAPP_ENTERPRISE_SUPPORT_URL }}" PARENTAL_CONSENT_AGE_LIMIT: "{{ EDXAPP_PARENTAL_CONSENT_AGE_LIMIT }}" @@ -1491,6 +1624,14 @@ lms_env_config: ANALYTICS_API_URL: "{{ EDXAPP_LMS_ANALYTICS_API_URL }}" GOOGLE_SITE_VERIFICATION_ID: "{{ EDXAPP_GOOGLE_SITE_VERIFICATION_ID }}" STATIC_URL_BASE: "{{ EDXAPP_LMS_STATIC_URL_BASE }}" + X_FRAME_OPTIONS: "{{ EDXAPP_X_FRAME_OPTIONS }}" + WRITABLE_GRADEBOOK_URL: "{{ EDXAPP_LMS_WRITABLE_GRADEBOOK_URL }}" + PROFILE_MICROFRONTEND_URL: "{{ EDXAPP_PROFILE_MICROFRONTEND_URL }}" + ORDER_HISTORY_MICROFRONTEND_URL: "{{ EDXAPP_ORDER_HISTORY_MICROFRONTEND_URL }}" + PROGRAM_CERTIFICATES_ROUTING_KEY: "{{ EDXAPP_PROGRAM_CERTIFICATES_ROUTING_KEY }}" + ACCOUNT_MICROFRONTEND_URL: "{{ EDXAPP_ACCOUNT_MICROFRONTEND_URL }}" + PROGRAM_MANAGER_MICROFRONTEND_URL: "{{ EDXAPP_PROGRAM_MANAGER_MICROFRONTEND_URL}}" + LEARNING_MICROFRONTEND_URL: "{{ EDXAPP_LEARNING_MICROFRONTEND_URL}}" cms_auth_config: <<: *edxapp_generic_auth @@ -1499,7 +1640,6 @@ cms_auth_config: ADDITIONAL_OPTIONS: "{{ EDXAPP_CONTENTSTORE_ADDITIONAL_OPTS }}" DOC_STORE_CONFIG: "{{ EDXAPP_CMS_DOC_STORE_CONFIG }}" DOC_STORE_CONFIG: "{{ EDXAPP_CMS_DOC_STORE_CONFIG }}" - ENTERPRISE_REPORTING_SECRET: "{{ EDXAPP_ENTERPRISE_REPORTING_SECRET }}" MODULESTORE: default: ENGINE: 'xmodule.modulestore.mixed.MixedModuleStore' @@ -1522,6 +1662,7 @@ cms_auth_config: render_template: 'edxmako.shortcuts.render_to_string' SEGMENT_KEY: "{{ EDXAPP_CMS_SEGMENT_KEY }}" PARSE_KEYS: "{{ EDXAPP_PARSE_KEYS }}" + FERNET_KEYS: "{{ EDXAPP_FERNET_KEYS }}" cms_env_config: <<: *edxapp_generic_env @@ -1531,6 +1672,7 @@ cms_env_config: ALTERNATE_WORKER_QUEUES: "lms" COURSE_IMPORT_EXPORT_BUCKET: "{{ EDXAPP_IMPORT_EXPORT_BUCKET }}" STATIC_URL_BASE: "{{ EDXAPP_CMS_STATIC_URL_BASE }}" + X_FRAME_OPTIONS: "{{ EDXAPP_X_FRAME_OPTIONS }}" # install dir for the edx-platform repo edxapp_code_dir: "{{ edxapp_app_dir }}/edx-platform" @@ -1564,19 +1706,21 @@ worker_core_mult: # TODO: change variables to ALL-CAPS, since they are meant to be externally overridden edxapp_theme_name: "" edxapp_theme_source_repo: 'https://{{ COMMON_GIT_MIRROR }}/Stanford-Online/edx-theme.git' -edxapp_theme_version: 'master' +EDXAPP_THEME_VERSION: 'master' +edxapp_theme_version: "{{ EDXAPP_THEME_VERSION }}" # make this the public URL instead of writable edx_platform_repo: "https://{{ COMMON_GIT_MIRROR }}/edx/edx-platform.git" -# `edx_platform_version` can be anything that git recognizes as a commit +# `EDX_PLATFORM_VERSION` can be anything that git recognizes as a commit # reference, including a tag, a branch name, or a commit hash -edx_platform_version: 'release' +EDX_PLATFORM_VERSION: 'release' +edx_platform_version: "{{ EDX_PLATFORM_VERSION }}" custom_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/custom.txt" base_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/base.txt" django_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/django.txt" openstack_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/openstack.txt" -sandbox_base_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/base.txt" +sandbox_base_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/{% if edxapp_sandbox_python_version == 'python2.7' %}base.txt{% else %}py35.txt{% endif %}" # The Python requirements files in the order they should be installed. This order should # match the order of PYTHON_REQ_FILES in edx-platform/pavelib/prereqs.py. @@ -1592,8 +1736,6 @@ edxapp_requirements_with_github_urls: - "{{ base_requirements_file }}" - "{{ sandbox_base_requirements }}" -edxapp_chrislea_ppa: "ppa:chris-lea/node.js" - edxapp_debian_pkgs_default: # for compiling the virtualenv # (only needed if wheel files aren't available) @@ -1608,13 +1750,13 @@ edxapp_debian_pkgs_default: # misc - curl - ipython - - nodejs - ntp # matplotlib needs libfreetype6-dev - libfreetype6-dev # cffi needs libffi-dev - libffi-dev - python-dev + - python3-dev - libsqlite3-dev edxapp_debian_pkgs_extra: [] @@ -1625,6 +1767,7 @@ edxapp_cms_variant: cms # Worker Settings worker_django_settings_module: '{{ EDXAPP_SETTINGS }}' +EDXAPP_CELERY_HEARTBEAT_ENABLED: true # Add default service worker users SERVICE_WORKER_USERS: @@ -1653,8 +1796,26 @@ SERVICE_WORKER_USERS: username: "{{ CREDENTIALS_SERVICE_USER_NAME }}" is_staff: true is_superuser: false + - email: "{{ DESIGNER_SERVICE_USER_EMAIL }}" + username: "{{ DESIGNER_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ REGISTRAR_SERVICE_USER_EMAIL }}" + username: "{{ REGISTRAR_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + - email: "{{ EDXAPP_RETIREMENT_SERVICE_USER_EMAIL }}" + username: "{{ EDXAPP_RETIREMENT_SERVICE_USER_NAME }}" + is_staff: true + is_superuser: false + enabled: "{{ COMMON_RETIREMENT_SERVICE_SETUP | default(false) }}" EDXAPP_ENABLE_DJANGO_ADMIN_RESTRICTION: false +EDXAPP_LMS_LOCAL_CONFIG_FILE: "{{ UNENCRYPTED_CFG_DIR }}/lms.yml" +EDXAPP_CMS_LOCAL_CONFIG_FILE: "{{ UNENCRYPTED_CFG_DIR }}/studio.yml" + +edxapp_staticfiles_storage_overrides: !!null + # for docker devstack private_requirements_file: "{{ edxapp_app_dir }}/customer_private_requirements.txt" diff --git a/playbooks/roles/edxapp/meta/main.yml b/playbooks/roles/edxapp/meta/main.yml index 661bbb431d5..23d21a6f1a0 100644 --- a/playbooks/roles/edxapp/meta/main.yml +++ b/playbooks/roles/edxapp/meta/main.yml @@ -1,7 +1,13 @@ --- dependencies: - common - - supervisor + - role: supervisor + supervisor_spec: + - service: edxapp + migration_check_services: "lms,cms,workers" + python: python.edxapp + code: "{{ edxapp_code_dir | default(None) }}" + env: "{{ edxapp_app_dir | default(None) }}/edxapp_env" - edxapp_common - nltk - role: edx_themes diff --git a/playbooks/roles/edxapp/tasks/deploy.yml b/playbooks/roles/edxapp/tasks/deploy.yml index 7177582c3ef..e3217df60f4 100644 --- a/playbooks/roles/edxapp/tasks/deploy.yml +++ b/playbooks/roles/edxapp/tasks/deploy.yml @@ -45,9 +45,8 @@ repo: "{{ edx_platform_repo }}" version: "{{ edx_platform_version }}" accept_hostkey: yes + key_file: "{% if EDXAPP_USE_GIT_IDENTITY %}{{ edxapp_git_identity }}{% endif %}" become_user: "{{ edxapp_user }}" - environment: - GIT_SSH: "{{ edxapp_git_ssh }}" register: edxapp_platform_checkout tags: - install @@ -74,10 +73,9 @@ repo: "{{ edxapp_theme_source_repo }}" version: "{{ edxapp_theme_version }}" accept_hostkey: yes - when: edxapp_theme_name and not EDXAPP_ENABLE_COMPREHENSIVE_THEMING + key_file: "{% if EDXAPP_USE_GIT_IDENTITY %}{{ edxapp_git_identity }}{% endif %}" + when: edxapp_theme_name != '' become_user: "{{ edxapp_user }}" - environment: - GIT_SSH: "{{ edxapp_git_ssh }}" register: edxapp_theme_checkout tags: - install @@ -121,7 +119,7 @@ - install:app-requirements - name: Create the virtualenv to install the Python requirements - command: "virtualenv {{ edxapp_venv_dir }}" + command: "virtualenv {{ edxapp_venv_dir }} -p {{ EDXAPP_PYTHON_VERSION }}" args: chdir: "{{ edxapp_code_dir }}" creates: "{{ edxapp_venv_dir }}/bin/pip" @@ -246,12 +244,6 @@ - install - install:app-requirements -- name: create nodeenv - shell: "{{ edxapp_venv_dir }}/bin/nodeenv {{ edxapp_nodeenv_dir }} --node={{ edxapp_node_version }} --prebuilt --force" - tags: - - install - - install:system-requirements - # Set the npm registry # This needs to be done as root since npm is weird about # chown - https://github.com/npm/npm/issues/3565 @@ -276,7 +268,7 @@ #install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json - name: install node dependencies - shell: "{{ edxapp_nodeenv_bin }}/npm install" + shell: "npm install" args: chdir: "{{ edxapp_code_dir }}" environment: "{{ edxapp_environment }}" @@ -298,7 +290,7 @@ - install:app-requirements - name: Create the virtualenv to install the Python sandbox requirements - command: "virtualenv {{ edxapp_sandbox_venv_dir }} -p python2.7" + command: "virtualenv {{ edxapp_sandbox_venv_dir }} -p {{ edxapp_sandbox_python_version }}" args: chdir: "{{ edxapp_code_dir }}" creates: "{{ edxapp_sandbox_venv_dir }}/bin/pip" @@ -330,6 +322,7 @@ virtualenv: "{{ edxapp_sandbox_venv_dir }}" state: present extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w" + virtualenv_python: "{{ edxapp_sandbox_python_version }}" become_user: "{{ edxapp_sandbox_user }}" when: EDXAPP_PYTHON_SANDBOX tags: @@ -345,6 +338,24 @@ - install - install:app-requirements +- name: code sandbox | test enforcement 1 + command: "{{ edxapp_sandbox_venv_dir }}/bin/python -c \"import os; os.listdir('/')\"" + register: sandbox_test1 + failed_when: "'PermissionError:' not in sandbox_test1.stderr" + when: EDXAPP_SANDBOX_ENFORCE + +- name: code sandbox | test enforcement 2 + command: "{{ edxapp_sandbox_venv_dir }}/bin/python -c \"import subprocess; subprocess.check_output('ls', shell=True)\"" + register: sandbox_test2 + failed_when: "'PermissionError:' not in sandbox_test2.stderr" + when: EDXAPP_SANDBOX_ENFORCE + +- name: code sandbox | test enforcement 3 + command: "{{ edxapp_sandbox_venv_dir }}/bin/python -c \"print(7*11*13*17)\"" + register: sandbox_test3 + failed_when: "'17017' not in sandbox_test3.stdout" + when: EDXAPP_SANDBOX_ENFORCE + - name: compiling all py files in the edx-platform repo shell: "{{ edxapp_venv_bin }}/python -m compileall -q -x '.git/.*|node_modules/.*' {{ edxapp_code_dir }}" become_user: "{{ edxapp_user }}" @@ -377,6 +388,8 @@ - "lms.sh" - "cms.sh" - "worker.sh" + - "reload_lms_config.sh" + - "reload_cms_config.sh" tags: - install - install:configuration @@ -573,3 +586,17 @@ minute: "{{ EDXAPP_CLEARSESSIONS_CRON_MINUTES }}" day: "*" when: EDXAPP_CLEARSESSIONS_CRON_ENABLED + +- name: install additional cron jobs + cron: "{{ item }}" + with_items: "{{ EDXAPP_ADDITIONAL_CRON_JOBS }}" + +- name: populate retirement states + shell: ". {{ edxapp_app_dir }}/edxapp_env && {{ edxapp_venv_bin }}/python ./manage.py lms --settings={{ edxapp_settings }} populate_retirement_states" + args: + chdir: "{{ edxapp_code_dir }}" + become_user: "{{ common_web_user }}" + when: COMMON_RETIREMENT_SERVICE_SETUP | default(false) + tags: + - manage + - manage:db diff --git a/playbooks/roles/edxapp/tasks/main.yml b/playbooks/roles/edxapp/tasks/main.yml index b08ec891127..d10fe30e412 100644 --- a/playbooks/roles/edxapp/tasks/main.yml +++ b/playbooks/roles/edxapp/tasks/main.yml @@ -6,8 +6,8 @@ user: name: "{{ edxapp_user }}" home: "{{ edxapp_app_dir }}" - createhome: no - shell: /bin/false + createhome: "{{ edxapp_user_createhome }}" + shell: "{{ edxapp_user_shell }}" tags: - install - install:base @@ -48,6 +48,15 @@ - devstack - devstack:install +- name: add paver autocomplete to bashrc + lineinfile: + dest: /root/.bashrc + line: "source {{ edxapp_code_dir }}/scripts/paver_autocomplete.sh" + when: devstack is defined and devstack + tags: + - devstack + - devstack:install + - name: create edxapp log dir file: path: "{{ edxapp_log_dir }}" @@ -106,6 +115,31 @@ - install - install:base +- name: Install the gpg key for nodejs LTS + apt_key: + url: "https://deb.nodesource.com/gpgkey/nodesource.gpg.key" + state: present + tags: + - install + - install:base + +- name: Install the nodejs LTS repos + apt_repository: + repo: "deb https://deb.nodesource.com/node_{{ edxapp_node_version }}.x {{ ansible_distribution_release }} main" + state: present + update_cache: yes + tags: + - install + - install:base + +- name: Install node + apt: + name: nodejs + state: present + tags: + - install + - install:base + - name: set up edxapp .npmrc template: src: .npmrc.j2 @@ -138,3 +172,15 @@ - include: deploy.yml tags: - deploy + +- name: Include JWT signature setting in the app config file + include_role: + name: jwt_signature + when: CONFIGURE_JWTS and celery_worker is not defined + vars: + app_name: 'lms' + app_config_file: "{{ COMMON_CFG_DIR }}/lms.yml" + app_config_owner: "{{ edxapp_user }}" + app_config_group: root + app_config_mode: 0644 + CAN_GENERATE_NEW_JWT_SIGNATURE: True diff --git a/playbooks/roles/edxapp/tasks/python_sandbox_env.yml b/playbooks/roles/edxapp/tasks/python_sandbox_env.yml index 889b908f81d..0315c50e418 100644 --- a/playbooks/roles/edxapp/tasks/python_sandbox_env.yml +++ b/playbooks/roles/edxapp/tasks/python_sandbox_env.yml @@ -43,6 +43,7 @@ home: "{{ edxapp_sandbox_venv_dir }}" tags: - edxapp-sandbox + - devstack - name: code sandbox | Install apparmor utils system pkg apt: diff --git a/playbooks/roles/edxapp/tasks/service_variant_config.yml b/playbooks/roles/edxapp/tasks/service_variant_config.yml index e3ffc0e4a41..3dedc77145d 100644 --- a/playbooks/roles/edxapp/tasks/service_variant_config.yml +++ b/playbooks/roles/edxapp/tasks/service_variant_config.yml @@ -50,6 +50,156 @@ - install:app-configuration - edxapp_cfg # Old deprecated tag, will remove when possible +- name: combined lms auth env for yml + set_fact: + lms_combined_config: '{{lms_env_config|combine(lms_auth_config)}}' + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + when: '"lms" in service_variants_enabled and not EDXAPP_DECRYPT_CONFIG_ENABLED' + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + - edxapp_cfg_yaml_only # Used to render the yaml without the json until we remove the json configs + +- name: render lms yml config # meant to replace existing json config eventually + template: + src: "lms.yml.j2" + dest: "{{ COMMON_CFG_DIR }}/lms.yml" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: 0640 + become: true + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + with_items: "{{ service_variants_enabled }}" + when: '"lms" in service_variants_enabled and not EDXAPP_DECRYPT_CONFIG_ENABLED' + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + - edxapp_cfg_yaml_only # Used to render the yaml without the json until we remove the json configs + +- name: combined cms auth env for yml + set_fact: + cms_combined_config: '{{cms_env_config|combine(cms_auth_config)}}' + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + when: '"cms" in service_variants_enabled and not EDXAPP_DECRYPT_CONFIG_ENABLED' + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + - edxapp_cfg_yaml_only # Used to render the yaml without the json until we remove the json configs + +- name: render studio yml config # meant to replace existing json config eventually + template: + src: "studio.yml.j2" + dest: "{{ COMMON_CFG_DIR }}/studio.yml" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: 0640 + become: true + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + with_items: "{{ service_variants_enabled }}" + when: '"cms" in service_variants_enabled and not EDXAPP_DECRYPT_CONFIG_ENABLED' + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + - edxapp_cfg_yaml_only # Used to render the yaml without the json until we remove the json configs + +- name: Decrypt lms config + local_action: command asym_crypto_yaml decrypt-encrypted-yaml --secrets_file_path {{ ENCRYPTED_CFG_DIR }}/lms.yml --private_key_path {{ DECRYPT_CONFIG_PRIVATE_KEY }} --outfile_path {{ UNENCRYPTED_CFG_DIR }}/lms.yml + become: false + with_items: "{{ service_variants_enabled }}" + when: '"lms" in service_variants_enabled and EDXAPP_DECRYPT_CONFIG_ENABLED' + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + - edxapp_cfg_yaml_only + +- name: Decrypt cms config + local_action: command asym_crypto_yaml decrypt-encrypted-yaml --secrets_file_path {{ ENCRYPTED_CFG_DIR }}/studio.yml --private_key_path {{ DECRYPT_CONFIG_PRIVATE_KEY }} --outfile_path {{ UNENCRYPTED_CFG_DIR }}/studio.yml + become: false + with_items: "{{ service_variants_enabled }}" + when: '"cms" in service_variants_enabled and EDXAPP_DECRYPT_CONFIG_ENABLED' + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + +- name: Replace deploy host to sandbox dns name + replace: + path: "{{ UNENCRYPTED_CFG_DIR }}/{{ item }}.yml" + regexp: 'deploy_host' + replace: "{{ COMMON_DEPLOY_HOSTNAME }}" + with_items: ['lms','studio'] + when: EDXAPP_DECRYPT_CONFIG_ENABLED and SANDBOX_CONFIG + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + become: false + delegate_to: localhost + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + +- name: Copy lms config file + copy: + src: "{{ EDXAPP_LMS_LOCAL_CONFIG_FILE }}" + dest: "{{ COMMON_CFG_DIR }}/lms.yml" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: 0640 + become: true + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + with_items: "{{ service_variants_enabled }}" + when: '"lms" in service_variants_enabled and EDXAPP_COPY_CONFIG_ENABLED' + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + +- name: Copy cms config file + copy: + src: "{{ EDXAPP_CMS_LOCAL_CONFIG_FILE }}" + dest: "{{ COMMON_CFG_DIR }}/studio.yml" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: 0640 + become: true + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + with_items: "{{ service_variants_enabled }}" + when: '"cms" in service_variants_enabled and EDXAPP_COPY_CONFIG_ENABLED' + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg # Old deprecated tag, will remove when possible + +- name: Write the revisions config file + template: + src: "revisions.yml.j2" + dest: "{{ edxapp_revision_cfg }}" + owner: "{{ edxapp_user }}" + group: "{{ common_web_group }}" + mode: 0640 + become: true + no_log: "{{ COMMON_CONFIG_NO_LOGGING }}" + tags: + - install + - install:configuration + - install:app-configuration + - edxapp_cfg_yaml_only # Used to render the yaml without the json until we remove the json configs + # write the supervisor scripts for the service variants - name: "writing {{ item }} supervisor script" template: diff --git a/playbooks/roles/edxapp/tasks/tag_ec2.yml b/playbooks/roles/edxapp/tasks/tag_ec2.yml index 4756ab49491..5d3b22a82ee 100644 --- a/playbooks/roles/edxapp/tasks/tag_ec2.yml +++ b/playbooks/roles/edxapp/tasks/tag_ec2.yml @@ -1,6 +1,6 @@ --- - name: get instance information - action: ec2_facts + action: ec2_metadata_facts - name: tag instance with edx_platform version ec2_tag: diff --git a/playbooks/roles/edxapp/templates/cms.auth.json.j2 b/playbooks/roles/edxapp/templates/cms.auth.json.j2 index 05378fbd711..e0f1351d395 100644 --- a/playbooks/roles/edxapp/templates/cms.auth.json.j2 +++ b/playbooks/roles/edxapp/templates/cms.auth.json.j2 @@ -1,8 +1,2 @@ {% do cms_auth_config.update(EDXAPP_CMS_AUTH_EXTRA) %} -{% for key, value in cms_auth_config.iteritems() -%} - {% if value == 'None' -%} - {% do cms_auth_config.update({key: None }) %} - {%- endif %} -{%- endfor %} - {{ cms_auth_config | to_nice_json }} diff --git a/playbooks/roles/edxapp/templates/cms.env.json.j2 b/playbooks/roles/edxapp/templates/cms.env.json.j2 index 1c0ef52aef0..3522b735946 100644 --- a/playbooks/roles/edxapp/templates/cms.env.json.j2 +++ b/playbooks/roles/edxapp/templates/cms.env.json.j2 @@ -1,7 +1,2 @@ {% do cms_env_config.update(EDXAPP_CMS_ENV_EXTRA) %} -{% for key, value in cms_env_config.iteritems() -%} - {% if value == 'None' -%} - {% do cms_env_config.update({key: None }) %} - {%- endif %} -{%- endfor %} {{ cms_env_config | to_nice_json }} diff --git a/playbooks/roles/edxapp/templates/cms_gunicorn.py.j2 b/playbooks/roles/edxapp/templates/cms_gunicorn.py.j2 index 1e6ec25dc23..ec2664c622b 100644 --- a/playbooks/roles/edxapp/templates/cms_gunicorn.py.j2 +++ b/playbooks/roles/edxapp/templates/cms_gunicorn.py.j2 @@ -1,5 +1,5 @@ """ -gunicorn configuration file: http://docs.gunicorn.org/en/develop/configure.html +gunicorn configuration file: http://docs.gunicorn.org/en/stable/configure.html {{ ansible_managed }} """ @@ -22,6 +22,8 @@ workers = {{ EDXAPP_WORKERS.cms }} workers = (multiprocessing.cpu_count()-1) * {{ worker_core_mult.cms }} + {{ worker_core_mult.cms }} {% endif %} +{{ common_pre_request }} + {{ common_close_all_caches }} def post_fork(server, worker): diff --git a/playbooks/roles/edxapp/templates/code.sandbox.j2 b/playbooks/roles/edxapp/templates/code.sandbox.j2 index 8a8f5278fc1..911af26174f 100644 --- a/playbooks/roles/edxapp/templates/code.sandbox.j2 +++ b/playbooks/roles/edxapp/templates/code.sandbox.j2 @@ -1,6 +1,6 @@ #include -{{ edxapp_sandbox_venv_dir }}/bin/python { +{{ edxapp_sandbox_venv_dir }}/bin/* { #include {{ edxapp_sandbox_venv_dir }}/** mr, @@ -12,15 +12,15 @@ # Whitelist particular shared objects from the system # python installation # - /usr/lib/python2.7/lib-dynload/_json.so mr, - /usr/lib/python2.7/lib-dynload/_ctypes.so mr, - /usr/lib/python2.7/lib-dynload/_heapq.so mr, - /usr/lib/python2.7/lib-dynload/_io.so mr, - /usr/lib/python2.7/lib-dynload/_csv.so mr, - /usr/lib/python2.7/lib-dynload/datetime.so mr, - /usr/lib/python2.7/lib-dynload/_elementtree.so mr, - /usr/lib/python2.7/lib-dynload/pyexpat.so mr, - /usr/lib/python2.7/lib-dynload/future_builtins.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/_json.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/_ctypes.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/_heapq.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/_io.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/_csv.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/datetime.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/_elementtree.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/pyexpat.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/future_builtins.so mr, # Matplot lib needs a place for temp caches {{ edxapp_sandbox_venv_dir }}/.config/ wrix, @@ -29,8 +29,8 @@ {{ edxapp_sandbox_venv_dir }}/.cache/** wrix, # Matplotlib related libraries - /usr/lib/python2.7/lib-dynload/termios.so mr, - /usr/lib/python2.7/lib-dynload/parser.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/termios.so mr, + /usr/lib/{{ edxapp_sandbox_python_version }}/lib-dynload/parser.so mr, # Matplot lib needs fonts to make graphs /usr/share/fonts/ r, @@ -42,5 +42,5 @@ # Allow access to selections from /proc # /proc/*/mounts r, - + } diff --git a/playbooks/roles/edxapp/templates/devstack.sh.j2 b/playbooks/roles/edxapp/templates/devstack.sh.j2 index 532aa2916f6..8e0cffbf9b8 100644 --- a/playbooks/roles/edxapp/templates/devstack.sh.j2 +++ b/playbooks/roles/edxapp/templates/devstack.sh.j2 @@ -6,11 +6,24 @@ source {{ edxapp_app_dir }}/edxapp_env COMMAND=$1 case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; open) - . {{ edxapp_nodeenv_bin }}/activate . {{ edxapp_venv_bin }}/activate cd {{ edxapp_code_dir }} /bin/bash ;; + exec) + shift + + . {{ edxapp_venv_bin }}/activate + cd {{ edxapp_code_dir }} + + "$@" + ;; + *) + "$@" + ;; esac diff --git a/playbooks/roles/edxapp/templates/edx/app/edxapp/cms.sh.j2 b/playbooks/roles/edxapp/templates/edx/app/edxapp/cms.sh.j2 index 21c564a95df..7b4c8cbd1dd 100644 --- a/playbooks/roles/edxapp/templates/edx/app/edxapp/cms.sh.j2 +++ b/playbooks/roles/edxapp/templates/edx/app/edxapp/cms.sh.j2 @@ -7,6 +7,7 @@ {% if COMMON_ENABLE_NEWRELIC_APP %} {% set executable = edxapp_venv_bin + '/newrelic-admin run-program ' + edxapp_venv_bin + '/gunicorn' %} +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED="{{ EDXAPP_CMS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}" export NEW_RELIC_APP_NAME="{{ EDXAPP_NEWRELIC_CMS_APPNAME }}" export NEW_RELIC_CONFIG_FILE="{{ edxapp_app_dir }}/newrelic.ini" if command -v ec2metadata >/dev/null 2>&1; then @@ -27,6 +28,8 @@ export DJANGO_SETTINGS_MODULE="{{ EDXAPP_CMS_ENV }}" export SERVICE_VARIANT="cms" export PATH="{{ edxapp_deploy_path }}" export BOTO_CONFIG="{{ edxapp_app_dir }}/.boto" +export EDX_REST_API_CLIENT_NAME="{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-studio" source {{ edxapp_app_dir }}/edxapp_env -{{ executable }} -c {{ edxapp_app_dir }}/cms_gunicorn.py {{ EDXAPP_CMS_GUNICORN_EXTRA }} cms.wsgi +# We exec so that gunicorn is the child of supervisor and can be managed properly +exec {{ executable }} -c {{ edxapp_app_dir }}/cms_gunicorn.py {{ EDXAPP_CMS_GUNICORN_EXTRA }} cms.wsgi diff --git a/playbooks/roles/edxapp/templates/edx/app/edxapp/lms.sh.j2 b/playbooks/roles/edxapp/templates/edx/app/edxapp/lms.sh.j2 index f3857226679..8cf6e703f3f 100644 --- a/playbooks/roles/edxapp/templates/edx/app/edxapp/lms.sh.j2 +++ b/playbooks/roles/edxapp/templates/edx/app/edxapp/lms.sh.j2 @@ -7,6 +7,7 @@ {% if COMMON_ENABLE_NEWRELIC_APP %} {% set executable = edxapp_venv_bin + '/newrelic-admin run-program ' + edxapp_venv_bin + '/gunicorn' %} +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED="{{ EDXAPP_LMS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}" export NEW_RELIC_APP_NAME="{{ EDXAPP_NEWRELIC_LMS_APPNAME }}" export NEW_RELIC_CONFIG_FILE="{{ edxapp_app_dir }}/newrelic.ini" if command -v ec2metadata >/dev/null 2>&1; then @@ -28,6 +29,8 @@ export DJANGO_SETTINGS_MODULE="{{ EDXAPP_LMS_ENV }}" export SERVICE_VARIANT="lms" export PATH="{{ edxapp_deploy_path }}" export BOTO_CONFIG="{{ edxapp_app_dir }}/.boto" +export EDX_REST_API_CLIENT_NAME="{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-lms" source {{ edxapp_app_dir }}/edxapp_env -{{ executable }} -c {{ edxapp_app_dir }}/lms_gunicorn.py lms.wsgi +# We exec so that gunicorn is the child of supervisor and can be managed properly +exec {{ executable }} -c {{ edxapp_app_dir }}/lms_gunicorn.py lms.wsgi diff --git a/playbooks/roles/edxapp/templates/edx/app/edxapp/reload_cms_config.sh.j2 b/playbooks/roles/edxapp/templates/edx/app/edxapp/reload_cms_config.sh.j2 new file mode 100644 index 00000000000..462e66e2804 --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/app/edxapp/reload_cms_config.sh.j2 @@ -0,0 +1,13 @@ +#jinja2:trim_blocks: False +{# Have to disable trim blocks or else fi at end of file is not on it's own line #} +#!/bin/bash + +# Reload Studio gunicorn if this machine has the Studio frontend +if [ -e '/edx/app/supervisor/conf.d/cms.conf' ]; then + /edx/bin/supervisorctl signal HUP cms +fi + +# Reload Studio workers if this machine has workers +if [ -e '/edx/app/supervisor/conf.d/workers.conf' ]; then + /edx/bin/supervisorctl signal HUP {% for w in edxapp_workers %}{% if w.service_variant == 'cms' %}edxapp_worker:{{ w.service_variant }}_{{ w.queue }}_{{ w.concurrency }}{% if not loop.last %} {% endif %}{% endif %}{% endfor %} +fi diff --git a/playbooks/roles/edxapp/templates/edx/app/edxapp/reload_lms_config.sh.j2 b/playbooks/roles/edxapp/templates/edx/app/edxapp/reload_lms_config.sh.j2 new file mode 100644 index 00000000000..f2cee821358 --- /dev/null +++ b/playbooks/roles/edxapp/templates/edx/app/edxapp/reload_lms_config.sh.j2 @@ -0,0 +1,13 @@ +#jinja2:trim_blocks: False +{# Have to disable trim blocks or else fi at end of file is not on it's own line #} +#!/bin/bash + +# Reload LMS gunicorn if this machine has the LMS frontend +if [ -e '/edx/app/supervisor/conf.d/lms.conf' ]; then + /edx/bin/supervisorctl signal HUP lms +fi + +# Reload LMS workers if this machine has workers +if [ -e '/edx/app/supervisor/conf.d/workers.conf' ]; then + /edx/bin/supervisorctl signal HUP {% for w in edxapp_workers %}{% if w.service_variant == 'lms' %}edxapp_worker:{{ w.service_variant }}_{{ w.queue }}_{{ w.concurrency }}{% if not loop.last %} {% endif %}{% endif %}{% endfor %} +fi diff --git a/playbooks/roles/edxapp/templates/edx/app/edxapp/worker.sh.j2 b/playbooks/roles/edxapp/templates/edx/app/edxapp/worker.sh.j2 index a5ed81da837..75e3c116f98 100644 --- a/playbooks/roles/edxapp/templates/edx/app/edxapp/worker.sh.j2 +++ b/playbooks/roles/edxapp/templates/edx/app/edxapp/worker.sh.j2 @@ -3,8 +3,10 @@ # {{ ansible_managed }} {% set edxapp_venv_bin = edxapp_venv_dir + "/bin" %} +source {{ edxapp_app_dir }}/edxapp_env {% if COMMON_ENABLE_NEWRELIC_APP %} -{% set executable = edxapp_venv_bin + '/newrelic-admin run-program ' + edxapp_venv_bin + '/python' %} +{% set executable = edxapp_venv_bin + '/newrelic-admin run-program ' + edxapp_venv_bin + '/celery worker' %} + export NEW_RELIC_CONFIG_FILE="{{ edxapp_app_dir }}/newrelic.ini" if command -v ec2metadata >/dev/null 2>&1; then INSTANCEID=$(ec2metadata --instance-id); @@ -12,8 +14,9 @@ if command -v ec2metadata >/dev/null 2>&1; then export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME-$INSTANCEID" fi {% else %} -{% set executable = edxapp_venv_bin + '/python' %} +{% set executable = edxapp_venv_bin + '/celery worker' %} {% endif %} # We exec so that celery is the child of supervisor and can be managed properly -exec {{ executable }} {{ edxapp_code_dir }}/manage.py $@ + +exec {{ executable }} $@ diff --git a/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/mongod.conf.j2 b/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/mongod.conf.j2 deleted file mode 100644 index b522d3bd948..00000000000 --- a/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/mongod.conf.j2 +++ /dev/null @@ -1,5 +0,0 @@ -[program:mongod] -command=mongod --smallfiles --nojournal --storageEngine wiredTiger -stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log -autorestart=true diff --git a/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/sshd.conf.j2 b/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/sshd.conf.j2 deleted file mode 100644 index b00df69c41e..00000000000 --- a/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/sshd.conf.j2 +++ /dev/null @@ -1,5 +0,0 @@ -[program:sshd] -command=/usr/sbin/sshd -D -stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log -stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log -autorestart=true diff --git a/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/workers.conf.j2 b/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/workers.conf.j2 index 5d880960aa4..949d3e1b81e 100644 --- a/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/workers.conf.j2 +++ b/playbooks/roles/edxapp/templates/edx/app/supervisor/conf.d.available/workers.conf.j2 @@ -1,13 +1,13 @@ {% for w in edxapp_workers %} [program:{{ w.service_variant }}_{{ w.queue }}_{{ w.concurrency }}] -environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_WORKERS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}CONCURRENCY={{ w.concurrency }},LOGLEVEL=info,DJANGO_SETTINGS_MODULE={{ worker_django_settings_module }},LANG={{ EDXAPP_LANG }},PYTHONPATH={{ edxapp_code_dir }},SERVICE_VARIANT={{ w.service_variant }},BOTO_CONFIG="{{ edxapp_app_dir }}/.boto," +environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_WORKERS_APPNAME }},NEW_RELIC_DISTRIBUTED_TRACING_ENABLED={{ EDXAPP_WORKERS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}CONCURRENCY={{ w.concurrency }},LOGLEVEL=info,DJANGO_SETTINGS_MODULE={{ w.service_variant }}.envs.{{ worker_django_settings_module }},LANG={{ EDXAPP_LANG }},PYTHONPATH={{ edxapp_code_dir }},SERVICE_VARIANT={{ w.service_variant }},BOTO_CONFIG="{{ edxapp_app_dir }}/.boto",EDX_REST_API_CLIENT_NAME=edx.{{ w.service_variant }}.core.{{ w.queue }} user={{ common_web_user }} directory={{ edxapp_code_dir }} stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log -command={{ edxapp_app_dir }}/worker.sh {{ w.service_variant }} --settings={{ worker_django_settings_module }} celery worker --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.%%h --concurrency={{ w.concurrency }} {{ '--maxtasksperchild ' + w.max_tasks_per_child|string if w.max_tasks_per_child is defined else '' }} {{ '-O ' + w.prefetch_optimization if w.prefetch_optimization is defined else '' }} +command={{ edxapp_app_dir }}/worker.sh --app={{ w.service_variant }}.celery:APP --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.%%h --concurrency={{ w.concurrency }} {{ '--maxtasksperchild ' + w.max_tasks_per_child|string if w.max_tasks_per_child is defined else '' }} {{ '--without-heartbeat' if not EDXAPP_CELERY_HEARTBEAT_ENABLED|bool else '' }} {{ '-O ' + w.prefetch_optimization if w.prefetch_optimization is defined else '' }} killasgroup=true stopwaitsecs={{ w.stopwaitsecs | default(EDXAPP_WORKER_DEFAULT_STOPWAITSECS) }} ; Set autorestart to `true`. The default value for autorestart is `unexpected`, but celery < 4.x will exit diff --git a/playbooks/roles/edxapp/templates/edx/bin/edxapp-update-assets.j2 b/playbooks/roles/edxapp/templates/edx/bin/edxapp-update-assets.j2 index a1884d7f3d3..fe3dc8fc89d 100644 --- a/playbooks/roles/edxapp/templates/edx/bin/edxapp-update-assets.j2 +++ b/playbooks/roles/edxapp/templates/edx/bin/edxapp-update-assets.j2 @@ -1,3 +1,13 @@ {% include "edxapp_common.j2" %} -sudo -E -H -u {{ edxapp_user }} env "PATH=$PATH" {{ edxapp_venv_bin }}/paver update_assets --settings $EDX_PLATFORM_SETTINGS +{% if edxapp_staticfiles_storage_overrides %} +{% for override in edxapp_staticfiles_storage_overrides %} +sudo -E -H -u {{ edxapp_user }} \ + env "PATH=$PATH" "STATICFILES_STORAGE={{ override }}" \ + {{ edxapp_venv_bin }}/paver update_assets --debug-collect --settings=$EDX_PLATFORM_SETTINGS +{% endfor %} +{% else %} +sudo -E -H -u {{ edxapp_user }} \ + env "PATH=$PATH" \ + {{ edxapp_venv_bin }}/paver update_assets --debug-collect --settings $EDX_PLATFORM_SETTINGS +{% endif %} diff --git a/playbooks/roles/edxapp/templates/lms.auth.json.j2 b/playbooks/roles/edxapp/templates/lms.auth.json.j2 index 0a7afbee9ad..c43ef92abdd 100644 --- a/playbooks/roles/edxapp/templates/lms.auth.json.j2 +++ b/playbooks/roles/edxapp/templates/lms.auth.json.j2 @@ -1,7 +1,2 @@ {% do lms_auth_config.update(EDXAPP_LMS_AUTH_EXTRA) %} -{% for key, value in lms_auth_config.iteritems() -%} - {% if value == 'None' -%} - {% do lms_auth_config.update({key: None }) %} - {%- endif %} -{%- endfor %} {{ lms_auth_config | to_nice_json }} diff --git a/playbooks/roles/edxapp/templates/lms.env.json.j2 b/playbooks/roles/edxapp/templates/lms.env.json.j2 index 64b231a622f..7f4fbeb0449 100644 --- a/playbooks/roles/edxapp/templates/lms.env.json.j2 +++ b/playbooks/roles/edxapp/templates/lms.env.json.j2 @@ -1,7 +1,2 @@ {% do lms_env_config.update(EDXAPP_LMS_ENV_EXTRA) %} -{% for key, value in lms_env_config.iteritems() -%} - {% if value == 'None' -%} - {% do lms_env_config.update({key: None }) %} - {%- endif %} -{%- endfor %} {{ lms_env_config | to_nice_json }} diff --git a/playbooks/roles/edxapp/templates/lms.yml.j2 b/playbooks/roles/edxapp/templates/lms.yml.j2 new file mode 100644 index 00000000000..8f9b569db83 --- /dev/null +++ b/playbooks/roles/edxapp/templates/lms.yml.j2 @@ -0,0 +1,4 @@ +{% if lms_combined_config %} +{% do lms_combined_config.update(EDXAPP_LMS_ENV_EXTRA) %} +{{ lms_combined_config | to_nice_yaml }} +{% endif %} diff --git a/playbooks/roles/edxapp/templates/lms_gunicorn.py.j2 b/playbooks/roles/edxapp/templates/lms_gunicorn.py.j2 index 609d22e46b2..fa20316cf27 100644 --- a/playbooks/roles/edxapp/templates/lms_gunicorn.py.j2 +++ b/playbooks/roles/edxapp/templates/lms_gunicorn.py.j2 @@ -1,5 +1,5 @@ """ -gunicorn configuration file: http://docs.gunicorn.org/en/develop/configure.html +gunicorn configuration file: http://docs.gunicorn.org/en/stable/configure.html {{ ansible_managed }} """ @@ -25,6 +25,8 @@ workers = {{ EDXAPP_WORKERS.lms }} workers = (multiprocessing.cpu_count()-1) * {{ worker_core_mult.lms }} + {{ worker_core_mult.lms }} {% endif %} +{{ common_pre_request }} + {{ common_close_all_caches }} def post_fork(server, worker): diff --git a/playbooks/roles/edxapp/templates/revisions.yml.j2 b/playbooks/roles/edxapp/templates/revisions.yml.j2 new file mode 100644 index 00000000000..043bac70c21 --- /dev/null +++ b/playbooks/roles/edxapp/templates/revisions.yml.j2 @@ -0,0 +1,3 @@ +{% if edxapp_revisions_config %} +{{ edxapp_revisions_config | to_nice_yaml }} +{% endif %} diff --git a/playbooks/roles/edxapp/templates/studio.yml.j2 b/playbooks/roles/edxapp/templates/studio.yml.j2 new file mode 100644 index 00000000000..08f5c79cd08 --- /dev/null +++ b/playbooks/roles/edxapp/templates/studio.yml.j2 @@ -0,0 +1,4 @@ +{% if cms_combined_config %} +{% do cms_combined_config.update(EDXAPP_CMS_ENV_EXTRA) %} +{{ cms_combined_config | to_nice_yaml }} +{% endif %} diff --git a/playbooks/roles/edxapp_common/tasks/main.yml b/playbooks/roles/edxapp_common/tasks/main.yml index 343b8a032c7..b76c45c6960 100644 --- a/playbooks/roles/edxapp_common/tasks/main.yml +++ b/playbooks/roles/edxapp_common/tasks/main.yml @@ -1,10 +1,9 @@ --- - name: Install system packages apt: - name: "{{ item }}" + name: "{{ edxapp_common_debian_pkgs }}" state: present update_cache: yes - with_items: "{{ edxapp_common_debian_pkgs }}" tags: - install - install:base diff --git a/playbooks/roles/edxlocal/defaults/main.yml b/playbooks/roles/edxlocal/defaults/main.yml index 96201a52d29..d83b6c12dad 100644 --- a/playbooks/roles/edxlocal/defaults/main.yml +++ b/playbooks/roles/edxlocal/defaults/main.yml @@ -12,10 +12,11 @@ edxlocal_databases: - "{{ EDX_NOTES_API_MYSQL_DB_NAME | default(None) }}" - "{{ ANALYTICS_API_DEFAULT_DB_NAME | default(None) }}" - "{{ ANALYTICS_API_REPORTS_DB_NAME | default(None) }}" + - "{{ BLOCKSTORE_DEFAULT_DB_NAME | default(None) }}" - "{{ CREDENTIALS_DEFAULT_DB_NAME | default(None) }}" - "{{ DISCOVERY_DEFAULT_DB_NAME | default(None) }}" - - "{{ JOURNALS_DEFAULT_DB_NAME | default(None) }}" - "{{ VEDA_WEB_FRONTEND_DEFAULT_DB_NAME | default(None) }}" + - "{{ REGISTRAR_DEFAULT_DB_NAME | default(None) }}" edxlocal_database_users: - { @@ -53,6 +54,11 @@ edxlocal_database_users: user: "{{ HIVE_METASTORE_DATABASE_USER | default(None) }}", pass: "{{ HIVE_METASTORE_DATABASE_PASSWORD | default(None) }}" } + - { + db: "{{ BLOCKSTORE_DEFAULT_DB_NAME | default(None) }}", + user: "{{ BLOCKSTORE_DATABASE_USER | default(None) }}", + pass: "{{ BLOCKSTORE_DATABASE_PASSWORD | default(None) }}" + } - { db: "{{ CREDENTIALS_DEFAULT_DB_NAME | default(None) }}", user: "{{ CREDENTIALS_MYSQL_USER | default(None) }}", @@ -63,13 +69,13 @@ edxlocal_database_users: user: "{{ DISCOVERY_MYSQL_USER | default(None) }}", pass: "{{ DISCOVERY_MYSQL_PASSWORD | default(None) }}" } - - { - db: "{{ JOURNALS_DEFAULT_DB_NAME | default(None) }}", - user: "{{ JOURNALS_MYSQL_USER | default(None) }}", - pass: "{{ JOURNALS_MYSQL_PASSWORD | default(None) }}" - } - { db: "{{ VEDA_WEB_FRONTEND_DEFAULT_DB_NAME | default(None) }}", user: "{{ VEDA_WEB_FRONTEND_MYSQL_USER | default(None) }}", pass: "{{ VEDA_WEB_FRONTEND_MYSQL_PASSWORD | default(None) }}" } + - { + db: "{{ REGISTRAR_DEFAULT_DB_NAME | default(None) }}", + user: "{{ REGISTRAR_MYSQL_USER | default(None) }}", + pass: "{{ REGISTRAR_MYSQL_PASSWORD | default(None) }}" + } diff --git a/playbooks/roles/edxlocal/tasks/main.yml b/playbooks/roles/edxlocal/tasks/main.yml index b5efdbe1f8b..de2f4f1a44f 100644 --- a/playbooks/roles/edxlocal/tasks/main.yml +++ b/playbooks/roles/edxlocal/tasks/main.yml @@ -41,14 +41,14 @@ name: "{{ ANALYTICS_API_DATABASES.default.USER }}" password: "{{ ANALYTICS_API_DATABASES.default.PASSWORD }}" priv: '{{ ANALYTICS_API_DATABASES.default.NAME }}.*:ALL/reports.*:SELECT' - when: ANALYTICS_API_DATABASES.default is defined + when: ANALYTICS_API_DATABASES is defined and ANALYTICS_API_DATABASES.default is defined - name: create read-only reports user for the analytics-api mysql_user: name: "{{ ANALYTICS_API_DATABASES.reports.USER }}" password: "{{ ANALYTICS_API_DATABASES.reports.PASSWORD }}" priv: '{{ ANALYTICS_API_DATABASES.reports.NAME }}.*:SELECT' - when: ANALYTICS_API_DATABASES.reports is defined + when: ANALYTICS_API_DATABASES is defined and ANALYTICS_API_DATABASES.reports is defined - name: create a database for the hive metastore mysql_db: @@ -68,7 +68,7 @@ mysql_user: name: "{{ COMMON_MYSQL_READ_ONLY_USER }}" password: "{{ COMMON_MYSQL_READ_ONLY_PASS }}" - priv: "*.*:ALL" + priv: "*.*:SELECT" - name: setup the admin db user mysql_user: diff --git a/playbooks/roles/elasticsearch/tasks/main.yml b/playbooks/roles/elasticsearch/tasks/main.yml index d789f7bbef0..cbf3e35c536 100644 --- a/playbooks/roles/elasticsearch/tasks/main.yml +++ b/playbooks/roles/elasticsearch/tasks/main.yml @@ -57,6 +57,11 @@ - install:base register: elasticsearch_reinstall + # Prevent elasticsearch from being upgraded. +- dpkg_selections: + name: elasticsearch + selection: hold + - name: create directories file: path: "{{ item }}" diff --git a/playbooks/roles/enhanced_networking/tasks/main.yml b/playbooks/roles/enhanced_networking/tasks/main.yml index 7cfac06b62a..0834f404fac 100644 --- a/playbooks/roles/enhanced_networking/tasks/main.yml +++ b/playbooks/roles/enhanced_networking/tasks/main.yml @@ -23,7 +23,7 @@ # Uses local actions so multiple actions can be done in parallel. # - name: Get ec2 facts - ec2_facts: + ec2_metadata_facts: - name: Test for enhanced networking local_action: diff --git a/playbooks/roles/enterprise_catalog/defaults/main.yml b/playbooks/roles/enterprise_catalog/defaults/main.yml new file mode 100644 index 00000000000..c4fe3596b74 --- /dev/null +++ b/playbooks/roles/enterprise_catalog/defaults/main.yml @@ -0,0 +1,159 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role enterprise_catalog +# + + +# +# vars are namespace with the module name. +# +enterprise_catalog_service_name: 'enterprise_catalog' + +enterprise_catalog_user: "{{ enterprise_catalog_service_name }}" +enterprise_catalog_home: "{{ COMMON_APP_DIR }}/{{ enterprise_catalog_service_name }}" +enterprise_catalog_app_dir: "{{ COMMON_APP_DIR }}/{{ enterprise_catalog_service_name }}" +enterprise_catalog_code_dir: "{{ enterprise_catalog_app_dir }}/{{ enterprise_catalog_service_name }}" +enterprise_catalog_venvs_dir: "{{ enterprise_catalog_app_dir }}/venvs" +enterprise_catalog_venv_dir: "{{ enterprise_catalog_venvs_dir }}/enterprise_catalog" + +enterprise_catalog_celery_default_queue: 'enterprise_catalog.default' + +ENTERPRISE_CATALOG_CELERY_ALWAYS_EAGER: false +ENTERPRISE_CATALOG_CELERY_BROKER_TRANSPORT: '' +ENTERPRISE_CATALOG_CELERY_BROKER_USER: '' +ENTERPRISE_CATALOG_CELERY_BROKER_PASSWORD: '' +ENTERPRISE_CATALOG_CELERY_BROKER_HOSTNAME: '' +ENTERPRISE_CATALOG_CELERY_BROKER_VHOST: '' + +enterprise_catalog_environment: + ENTERPRISE_CATALOG_CFG: '{{ COMMON_CFG_DIR }}/{{ enterprise_catalog_service_name }}.yml' + +enterprise_catalog_gunicorn_port: 8160 + +enterprise_catalog_debian_pkgs: [] + +ENTERPRISE_CATALOG_ENABLE_EXPERIMENTAL_DOCKER_SHIM: false + +ENTERPRISE_CATALOG_NGINX_PORT: '1{{ enterprise_catalog_gunicorn_port }}' +ENTERPRISE_CATALOG_SSL_NGINX_PORT: '4{{ enterprise_catalog_gunicorn_port }}' + +ENTERPRISE_CATALOG_DEFAULT_DB_NAME: 'enterprise_catalog' +ENTERPRISE_CATALOG_MYSQL_HOST: 'localhost' +# MySQL usernames are limited to 16 characters +ENTERPRISE_CATALOG_MYSQL_USER: 'entcatalog001' +ENTERPRISE_CATALOG_MYSQL_PASSWORD: 'password' + +ENTERPRISE_CATALOG_MEMCACHE: [ 'memcache' ] + +ENTERPRISE_CATALOG_DJANGO_SETTINGS_MODULE: 'enterprise_catalog.settings.production' +ENTERPRISE_CATALOG_DOMAIN: 'localhost' +ENTERPRISE_CATALOG_URL_ROOT: 'http://{{ ENTERPRISE_CATALOG_DOMAIN }}:{{ ENTERPRISE_CATALOG_NGINX_PORT }}' +ENTERPRISE_CATALOG_LOGOUT_URL: '{{ ENTERPRISE_CATALOG_URL_ROOT }}/logout/' + +ENTERPRISE_CATALOG_LANGUAGE_CODE: 'en' +ENTERPRISE_CATALOG_LANGUAGE_COOKIE_NAME: 'openedx-language-preference' + +ENTERPRISE_CATALOG_SERVICE_USER: 'enterprise_catalog_service_user' + +ENTERPRISE_CATALOG_DATA_DIR: '{{ COMMON_DATA_DIR }}/{{ enterprise_catalog_service_name }}' +ENTERPRISE_CATALOG_MEDIA_ROOT: '{{ ENTERPRISE_CATALOG_DATA_DIR }}/media' +ENTERPRISE_CATALOG_MEDIA_URL: '/media/' + +ENTERPRISE_CATALOG_MEDIA_STORAGE_BACKEND: +DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' +MEDIA_ROOT: '{{ ENTERPRISE_CATALOG_MEDIA_ROOT }}' +MEDIA_URL: '{{ ENTERPRISE_CATALOG_MEDIA_URL }}' + +# TODO: Let edx_django_service manage ENTERPRISE_CATALOG_STATIC_ROOT in phase 2. +ENTERPRISE_CATALOG_STATIC_ROOT: '{{ ENTERPRISE_CATALOG_DATA_DIR }}/staticfiles' +ENTERPRISE_CATALOG_STATIC_URL: '/static/' + +ENTERPRISE_CATALOG_STATICFILES_STORAGE: 'django.contrib.staticfiles.storage.StaticFilesStorage' + +ENTERPRISE_CATALOG_CORS_ORIGIN_ALLOW_ALL: false +ENTERPRISE_CATALOG_CORS_ORIGIN_WHITELIST_DEFAULT: +- '{{ ENTERPRISE_CATALOG_DOMAIN }}' + +ENTERPRISE_CATALOG_CORS_ORIGIN_WHITELIST_EXTRA: [] +ENTERPRISE_CATALOG_CORS_ORIGIN_WHITELIST: '{{ ENTERPRISE_CATALOG_CORS_ORIGIN_WHITELIST_DEFAULT + ENTERPRISE_CATALOG_CORS_ORIGIN_WHITELIST_EXTRA }}' + +ENTERPRISE_CATALOG_VERSION: 'master' + +ENTERPRISE_CATALOG_GUNICORN_EXTRA: '' + +ENTERPRISE_CATALOG_EXTRA_APPS: [] + +ENTERPRISE_CATALOG_SESSION_EXPIRE_AT_BROWSER_CLOSE: false + +ENTERPRISE_CATALOG_CERTIFICATE_LANGUAGES: +'en': 'English' +'es_419': 'Spanish' + +enterprise_catalog_service_config_overrides: + CERTIFICATE_LANGUAGES: '{{ ENTERPRISE_CATALOG_CERTIFICATE_LANGUAGES }}' + ENTERPRISE_CATALOG_SERVICE_USER: '{{ ENTERPRISE_CATALOG_SERVICE_USER }}' + LANGUAGE_COOKIE_NAME: '{{ ENTERPRISE_CATALOG_LANGUAGE_COOKIE_NAME }}' + CSRF_COOKIE_SECURE: "{{ ENTERPRISE_CATALOG_CSRF_COOKIE_SECURE }}" + CELERY_ALWAYS_EAGER: '{{ ENTERPRISE_CATALOG_CELERY_ALWAYS_EAGER }}' + CELERY_BROKER_TRANSPORT: '{{ ENTERPRISE_CATALOG_CELERY_BROKER_TRANSPORT }}' + CELERY_BROKER_USER: '{{ ENTERPRISE_CATALOG_CELERY_BROKER_USER }}' + CELERY_BROKER_PASSWORD: '{{ ENTERPRISE_CATALOG_CELERY_BROKER_PASSWORD }}' + CELERY_BROKER_HOSTNAME: '{{ ENTERPRISE_CATALOG_CELERY_BROKER_HOSTNAME }}' + CELERY_BROKER_VHOST: '{{ ENTERPRISE_CATALOG_CELERY_BROKER_VHOST }}' + CELERY_DEFAULT_EXCHANGE: 'enterprise_catalog' + CELERY_DEFAULT_ROUTING_KEY: 'enterprise_catalog' + CELERY_DEFAULT_QUEUE: '{{ enterprise_catalog_celery_default_queue }}' + +# See edx_django_service_automated_users for an example of what this should be +ENTERPRISE_CATALOG_AUTOMATED_USERS: {} + +# NOTE: These variables are only needed to create the demo site (e.g. for sandboxes) +ENTERPRISE_CATALOG_LMS_URL_ROOT: !!null +ENTERPRISE_CATALOG_DISCOVERY_API_URL: !!null + +ENTERPRISE_CATALOG_CSRF_COOKIE_SECURE: false + +ENTERPRISE_CATALOG_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +enterprise_catalog_post_migrate_commands: [] + +ENTERPRISE_CATALOG_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'enterprise-catalog-sso-key' +ENTERPRISE_CATALOG_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'enterprise-catalog-sso-secret' +ENTERPRISE_CATALOG_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'enterprise-catalog-backend-service-key' +ENTERPRISE_CATALOG_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'enterprise-catalog-backend-service-secret' +ENTERPRISE_CATALOG_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false + +ENTERPRISE_CATALOG_GIT_IDENTITY: !!null + +ENTERPRISE_CATALOG_REPOS: + - PROTOCOL: '{{ COMMON_GIT_PROTOCOL }}' + DOMAIN: '{{ COMMON_GIT_MIRROR }}' + PATH: '{{ COMMON_GIT_PATH }}' + REPO: 'enterprise-catalog.git' + VERSION: '{{ ENTERPRISE_CATALOG_VERSION }}' + DESTINATION: "{{ enterprise_catalog_code_dir }}" + SSH_KEY: '{{ ENTERPRISE_CATALOG_GIT_IDENTITY }}' + +ENTERPRISE_CATALOG_SECRET_KEY: 'SET-ME-PLEASE' + +# Remote config +ENTERPRISE_CATALOG_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +ENTERPRISE_CATALOG_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +ENTERPRISE_CATALOG_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +# Worker settings +worker_django_settings_module: "{{ ENTERPRISE_CATALOG_DJANGO_SETTINGS_MODULE }}" +ENTERPRISE_CATALOG_CELERY_WORKERS: + - queue: '{{ enterprise_catalog_celery_default_queue }}' + concurrency: 1 + monitor: True +enterprise_catalog_workers: "{{ ENTERPRISE_CATALOG_CELERY_WORKERS }}" diff --git a/playbooks/roles/enterprise_catalog/meta/main.yml b/playbooks/roles/enterprise_catalog/meta/main.yml new file mode 100644 index 00000000000..03cd124f925 --- /dev/null +++ b/playbooks/roles/enterprise_catalog/meta/main.yml @@ -0,0 +1,54 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role enterprise_catalog + +dependencies: + - role: edx_django_service + edx_django_service_enable_experimental_docker_shim: '{{ ENTERPRISE_CATALOG_ENABLE_EXPERIMENTAL_DOCKER_SHIM }}' + edx_django_service_version: '{{ ENTERPRISE_CATALOG_VERSION }}' + edx_django_service_name: '{{ enterprise_catalog_service_name }}' + edx_django_service_config_overrides: '{{ enterprise_catalog_service_config_overrides }}' + edx_django_service_debian_pkgs_extra: '{{ enterprise_catalog_debian_pkgs }}' + edx_django_service_gunicorn_port: '{{ enterprise_catalog_gunicorn_port }}' + edx_django_service_repos: '{{ ENTERPRISE_CATALOG_REPOS }}' + edx_django_service_django_settings_module: '{{ ENTERPRISE_CATALOG_DJANGO_SETTINGS_MODULE }}' + edx_django_service_environment_extra: '{{ enterprise_catalog_environment }}' + edx_django_service_gunicorn_extra: '{{ ENTERPRISE_CATALOG_GUNICORN_EXTRA }}' + edx_django_service_nginx_port: '{{ ENTERPRISE_CATALOG_NGINX_PORT }}' + edx_django_service_ssl_nginx_port: '{{ ENTERPRISE_CATALOG_SSL_NGINX_PORT }}' + edx_django_service_language_code: '{{ ENTERPRISE_CATALOG_LANGUAGE_CODE }}' + edx_django_service_secret_key: '{{ ENTERPRISE_CATALOG_SECRET_KEY }}' + edx_django_service_media_storage_backend: '{{ ENTERPRISE_CATALOG_MEDIA_STORAGE_BACKEND }}' + edx_django_service_staticfiles_storage: '{{ ENTERPRISE_CATALOG_STATICFILES_STORAGE }}' + edx_django_service_memcache: '{{ ENTERPRISE_CATALOG_MEMCACHE }}' + edx_django_service_default_db_host: '{{ ENTERPRISE_CATALOG_MYSQL_HOST }}' + edx_django_service_default_db_name: '{{ ENTERPRISE_CATALOG_DEFAULT_DB_NAME }}' + edx_django_service_default_db_atomic_requests: false + edx_django_service_db_user: '{{ ENTERPRISE_CATALOG_MYSQL_USER }}' + edx_django_service_db_password: '{{ ENTERPRISE_CATALOG_MYSQL_PASSWORD }}' + edx_django_service_extra_apps: '{{ ENTERPRISE_CATALOG_EXTRA_APPS }}' + edx_django_service_session_expire_at_browser_close: '{{ ENTERPRISE_CATALOG_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' + edx_django_service_social_auth_edx_oauth2_key: '{{ ENTERPRISE_CATALOG_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ ENTERPRISE_CATALOG_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ ENTERPRISE_CATALOG_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ ENTERPRISE_CATALOG_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + edx_django_service_automated_users: '{{ ENTERPRISE_CATALOG_AUTOMATED_USERS }}' + edx_django_service_cors_whitelist: '{{ ENTERPRISE_CATALOG_CORS_ORIGIN_WHITELIST }}' + edx_django_service_post_migrate_commands: '{{ enterprise_catalog_post_migrate_commands }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ ENTERPRISE_CATALOG_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_decrypt_config_enabled: '{{ ENTERPRISE_CATALOG_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ ENTERPRISE_CATALOG_COPY_CONFIG_ENABLED }}' + edx_django_service_migration_check_services: '{{ enterprise_catalog_service_name }},{{ enterprise_catalog_service_name }}_workers' + edx_django_service_enable_celery_workers: true + edx_django_service_workers: '{{ enterprise_catalog_workers }}' + # Need this override because the existing AWS ASGs have a services tag with a name that doesn't match the convention + edx_django_service_workers_supervisor_conf: 'enterprise_catalog_worker.conf' + edx_django_service_docker_image_name: 'openedx/enterprise-catalog' diff --git a/playbooks/roles/container/meta/main.yml b/playbooks/roles/enterprise_catalog/tasks/main.yml similarity index 64% rename from playbooks/roles/container/meta/main.yml rename to playbooks/roles/enterprise_catalog/tasks/main.yml index 3b76e095663..44f4dcdd5c0 100644 --- a/playbooks/roles/container/meta/main.yml +++ b/playbooks/roles/enterprise_catalog/tasks/main.yml @@ -7,16 +7,16 @@ # code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions # license: https://github.com/edx/configuration/blob/master/LICENSE.TXT # -## -# Role includes for role container -# -# Example: -# -# dependencies: -# - { -# role: my_role -# my_role_var0: "foo" -# my_role_var1: "bar" -# } -dependencies: - - common +# +# +# Tasks for role enterprise_catalog +# +# Overview: This role's tasks come from edx_django_service. +# +# +# Dependencies: +# +# +# Example play: +# +# diff --git a/playbooks/roles/forum/defaults/main.yml b/playbooks/roles/forum/defaults/main.yml index daa6bd790b4..b691bb98098 100644 --- a/playbooks/roles/forum/defaults/main.yml +++ b/playbooks/roles/forum/defaults/main.yml @@ -19,6 +19,10 @@ FORUM_MONGO_HOSTS: FORUM_MONGO_TAGS: !!null FORUM_MONGO_PORT: "27017" FORUM_MONGO_DATABASE: "cs_comments_service" +FORUM_MONGO_AUTH_DB: "" +# Must be set if user credentials are provided. +# Can be one of :scram, :mongodb_cr, :mongodb_x509, :plain, or empty string "" if no credentials. +FORUM_MONGO_AUTH_MECH: ":scram" FORUM_MONGO_USE_SSL: false FORUM_MONGO_SSL_VERIFY: true FORUM_MONGO_AUTH_SOURCE: "{{ FORUM_MONGO_DATABASE }}" @@ -63,8 +67,9 @@ forum_base_env: &forum_base_env MONGOHQ_URL: "{{ FORUM_MONGO_URL }}" MONGOID_USE_SSL: "{{ FORUM_MONGO_USE_SSL }}" MONGOID_SSL_VERIFY: "{{ FORUM_MONGO_SSL_VERIFY }}" - MONGOID_AUTH_SOURCE: "{{ FORUM_MONGO_AUTH_SOURCE }}" MONGOID_REPLICA_SET: "{{ FORUM_MONGO_REPLICA_SET }}" + MONGOID_AUTH_SOURCE: "{{ FORUM_MONGO_AUTH_DB }}" + MONGOID_AUTH_MECH: "{{ FORUM_MONGO_AUTH_MECH }}" HOME: "{{ forum_app_dir }}" NEW_RELIC_ENABLE: "{{ FORUM_NEW_RELIC_ENABLE }}" NEW_RELIC_APP_NAME: "{{ FORUM_NEW_RELIC_APP_NAME }}" @@ -83,11 +88,14 @@ devstack_forum_env: SINATRA_ENV: "development" SEARCH_SERVER: "http://edx.devstack.elasticsearch:9200/" MONGOHQ_URL: "mongodb://cs_comments_service:password@edx.devstack.mongo:27017/cs_comments_service" + MONGOID_AUTH_MECH: "{{ FORUM_MONGO_AUTH_MECH }}" forum_user: "forum" -forum_ruby_version: "2.4.1" +FORUM_RUBY_VERSION: "2.5.7" +forum_ruby_version: "{{ FORUM_RUBY_VERSION }}" forum_source_repo: "https://github.com/edx/cs_comments_service.git" -forum_version: "master" +FORUM_VERSION: "master" +forum_version: "{{ FORUM_VERSION }}" # # test config diff --git a/playbooks/roles/forum/tasks/main.yml b/playbooks/roles/forum/tasks/main.yml index 787fab76309..087604e5da1 100644 --- a/playbooks/roles/forum/tasks/main.yml +++ b/playbooks/roles/forum/tasks/main.yml @@ -44,6 +44,20 @@ - install - install:base + +- name: write devstack script + template: + src: "devstack.sh.j2" + dest: "{{ forum_app_dir }}/devstack.sh" + owner: "{{ supervisor_user }}" + group: "{{ common_web_user }}" + mode: 0744 + when: devstack is defined and devstack + tags: + - devstack + - devstack:install + + - name: setup the forum env for stage/prod template: src: forum_env.j2 diff --git a/playbooks/roles/forum/tasks/tag_ec2.yml b/playbooks/roles/forum/tasks/tag_ec2.yml index 7466c22df4a..c064d55e798 100644 --- a/playbooks/roles/forum/tasks/tag_ec2.yml +++ b/playbooks/roles/forum/tasks/tag_ec2.yml @@ -1,6 +1,6 @@ --- - name: get instance information - action: ec2_facts + action: ec2_metadata_facts - name: tag instance ec2_tag: diff --git a/playbooks/roles/forum/templates/devstack.sh.j2 b/playbooks/roles/forum/templates/devstack.sh.j2 new file mode 100644 index 00000000000..ab025fa7256 --- /dev/null +++ b/playbooks/roles/forum/templates/devstack.sh.j2 @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +source {{ forum_app_dir }}/devstack_forum_env +COMMAND=$1 + +case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; + open) + cd {{ forum_code_dir }} + + /bin/bash + ;; + exec) + shift + + cd {{ forum_code_dir }} + + "$@" + ;; + *) + "$@" + ;; +esac diff --git a/playbooks/roles/gh_mirror/files/repos_from_orgs.py b/playbooks/roles/gh_mirror/files/repos_from_orgs.py index a1db309a555..35a41cf5dae 100644 --- a/playbooks/roles/gh_mirror/files/repos_from_orgs.py +++ b/playbooks/roles/gh_mirror/files/repos_from_orgs.py @@ -6,6 +6,8 @@ # a yaml file containing a list of # github organizations +from __future__ import absolute_import +from __future__ import print_function import yaml import sys import requests @@ -49,9 +51,9 @@ def refresh_cache(): path = dirname(abspath(__file__)) try: with open(join(path, 'orgs.yml')) as f: - orgs = yaml.load(f) + orgs = yaml.safe_load(f) except IOError: - print "Unable to read {}/orgs.yml, does it exist?".format(path) + print("Unable to read {}/orgs.yml, does it exist?".format(path)) sys.exit(1) repos = [] @@ -97,7 +99,7 @@ def update_repos(): else: check_running() if not args.datadir: - print "Please specificy a repository directory" + print("Please specificy a repository directory") sys.exit(1) if not os.path.exists('/var/tmp/repos.json'): refresh_cache() diff --git a/playbooks/roles/ghost/defaults/main.yml b/playbooks/roles/ghost/defaults/main.yml new file mode 100644 index 00000000000..ff9f7b42ff1 --- /dev/null +++ b/playbooks/roles/ghost/defaults/main.yml @@ -0,0 +1,5 @@ +--- +GHOST_VERSION: 1.0.48 +ghost_package_name: gh-ost +ghost_download_target: "/tmp/{{ ghost_package_name }}_{{ GHOST_VERSION }}.deb" +ghost_package_url: https://github.com/github/gh-ost/releases/download/v{{ GHOST_VERSION }}/gh-ost_{{ GHOST_VERSION }}_amd64.deb diff --git a/playbooks/roles/ghost/tasks/main.yml b/playbooks/roles/ghost/tasks/main.yml new file mode 100644 index 00000000000..0e2082709ba --- /dev/null +++ b/playbooks/roles/ghost/tasks/main.yml @@ -0,0 +1,27 @@ +- name: Check if package is installed and version is correct + command: dpkg -s gh-ost | grep Version | cut -d ':' -f 3 | grep {{ GHOST_VERSION }} + register: ghost_correct + ignore_errors: True + +- name: Remove package if exists + apt: + name: "{{ ghost_package_name }}" + state: absent + when: ghost_correct.rc != 0 + +- name: Download package + get_url: + url="{{ ghost_package_url }}" + dest="{{ ghost_download_target }}" + when: ghost_correct.rc != 0 + +- name: Install ghost_package + apt: deb="{{ ghost_download_target }}" + sudo: true + when: ghost_correct.rc != 0 + +- name: "Install percona packages for dropping large tables" + apt: + name: + - "percona-toolkit" + state: "present" diff --git a/playbooks/roles/gitreload/defaults/main.yml b/playbooks/roles/gitreload/defaults/main.yml index 59d546cfdb6..a893931e7a5 100644 --- a/playbooks/roles/gitreload/defaults/main.yml +++ b/playbooks/roles/gitreload/defaults/main.yml @@ -9,7 +9,7 @@ # ## # Defaults for role gitreload -# +# GITRELOAD_COURSE_CHECKOUT: false GITRELOAD_GIT_IDENTITY: !!null @@ -26,7 +26,8 @@ GITRELOAD_NGINX_PORT: "18095" GITRELOAD_GUNICORN_EXTRA: "" GITRELOAD_GUNICORN_EXTRA_CONF: "" -gitreload_version: "master" +GITRELOAD_VERSION: "master" +gitreload_version: "{{ GITRELOAD_VERSION }}" gitreload_dir: "{{ COMMON_APP_DIR }}/gitreload" gitreload_user: "gitreload" gitreload_repo: "https://github.com/mitodl/gitreload.git" diff --git a/playbooks/roles/go-agent/meta/main.yml b/playbooks/roles/go-agent/meta/main.yml index 59204240bb6..7ac691de9da 100644 --- a/playbooks/roles/go-agent/meta/main.yml +++ b/playbooks/roles/go-agent/meta/main.yml @@ -9,12 +9,12 @@ # ## # Role includes for role gocd -# +# # Example: # # dependencies: # - { -# role: my_role +# role: my_role # my_role_var0: "foo" # my_role_var1: "bar" # } diff --git a/playbooks/roles/go-server/defaults/main.yml b/playbooks/roles/go-server/defaults/main.yml index b969ebfe9a5..b7149e8fc16 100644 --- a/playbooks/roles/go-server/defaults/main.yml +++ b/playbooks/roles/go-server/defaults/main.yml @@ -83,7 +83,7 @@ GO_SERVER_BACKUP_API_URL: "http://localhost:8153/go/api/backups" GO_SERVER_BACKUP_TMP_LOCATION: "/tmp/{{ GO_SERVER_BACKUP_FILENAME }}" GO_SERVER_BACKUP_CRON_SCRIPT_LOCATION: "/root/gocd_backup.sh" GO_SERVER_BACKUP_CRON_HOUR: "23" -GO_SERVER_BACKUP_CRON_MINUTE: "58" +GO_SERVER_BACKUP_CRON_MINUTE: "30" # When "true", attempts to restore go-server backup from S3. GO_SERVER_RESTORE_BACKUP: false diff --git a/playbooks/roles/go-server/meta/main.yml b/playbooks/roles/go-server/meta/main.yml index 48dfc9bbb4b..5127feb5689 100644 --- a/playbooks/roles/go-server/meta/main.yml +++ b/playbooks/roles/go-server/meta/main.yml @@ -18,3 +18,6 @@ # my_role_var0: "foo" # my_role_var1: "bar" # } + +dependencies: + - common diff --git a/playbooks/roles/go-server/tasks/main.yml b/playbooks/roles/go-server/tasks/main.yml index 592031cc33c..f5c4a1286a3 100644 --- a/playbooks/roles/go-server/tasks/main.yml +++ b/playbooks/roles/go-server/tasks/main.yml @@ -30,17 +30,17 @@ # - common # -- name: install go-server aptitude repository - apt_repository: - repo: "{{ GO_SERVER_APT_SOURCE }}" +- name: install aptitude key for go-server/go-server + apt_key: + url: "{{ GO_SERVER_APT_KEY_URL }}" state: present tags: - install - install:base - -- name: install aptitude key for go-server/go-server - apt_key: - url: "{{ GO_SERVER_APT_KEY_URL }}" + +- name: install go-server aptitude repository + apt_repository: + repo: "{{ GO_SERVER_APT_SOURCE }}" state: present tags: - install @@ -127,7 +127,7 @@ when: GO_SERVER_BACKUP_S3_BUCKET and GO_SERVER_BACKUP_S3_OBJECT and GO_SERVER_RESTORE_BACKUP - name: generate lines for go-server password file for users - command: "/usr/bin/htpasswd -nbs '{{ item.username }}' '{{ item.password }}'" + command: "/usr/bin/htpasswd -nbB '{{ item.username }}' '{{ item.password }}'" with_items: "{{ GO_SERVER_USERS }}" when: item.username and item.password register: go_server_users_htpasswds @@ -150,6 +150,16 @@ - install - install:app-configuration +- name: create ssh directory + file: + path: "/var/go/.ssh" + state: directory + mode: "0755" + owner: "{{ GO_SERVER_USER }}" + group: "{{ GO_SERVER_GROUP }}" + tags: + - git_identity + - name: install ssh key for the secure repos copy: content: "{{ GO_GIT_KEY }}" diff --git a/playbooks/roles/grafana/tasks/main.yml b/playbooks/roles/grafana/tasks/main.yml index 7a5dd54414a..5dfe4ef3476 100644 --- a/playbooks/roles/grafana/tasks/main.yml +++ b/playbooks/roles/grafana/tasks/main.yml @@ -25,20 +25,18 @@ msg: "this playbook can only be run on an Ubuntu host" when: ansible_distribution != "Ubuntu" -- name: install PackageCloud GPG key +- name: install packages.grafana.com GPG key apt_key: - id: "418A7F2FB0E1E6E7EABF6FE8C2E73424D59097AB" - url: "https://packagecloud.io/gpg.key" + url: "https://packages.grafana.com/gpg.key" state: present tags: - install - install:system-requirements -- name: install PackageCloud PPA +- name: install packages.grafana.com PPA apt_repository: - # This is Debian Jessie repository, when we use Ubuntu Xenial, yes. It's the latest Debian repository they - # populate for their official packages. It does work, so, *shrug*. - repo: "deb https://packagecloud.io/grafana/stable/debian/ jessie main" + # PackageCloud isn't enabled anymore, so using packages.grafana.com(official) instead + repo: "deb https://packages.grafana.com/oss/deb stable main" update_cache: yes state: present tags: diff --git a/playbooks/roles/hadoop_common/templates/core-site.xml.j2 b/playbooks/roles/hadoop_common/templates/core-site.xml.j2 index 40aa4cba9e1..8d97d12e430 100644 --- a/playbooks/roles/hadoop_common/templates/core-site.xml.j2 +++ b/playbooks/roles/hadoop_common/templates/core-site.xml.j2 @@ -3,7 +3,7 @@ -{% for key, value in HADOOP_CORE_SITE_DEFAULT_CONFIG.iteritems() %} +{% for key, value in HADOOP_CORE_SITE_DEFAULT_CONFIG.items() %} {{ key }} {{ value }} diff --git a/playbooks/roles/hadoop_common/templates/hdfs-site.xml.j2 b/playbooks/roles/hadoop_common/templates/hdfs-site.xml.j2 index f8e758ab8ba..895f7a57979 100644 --- a/playbooks/roles/hadoop_common/templates/hdfs-site.xml.j2 +++ b/playbooks/roles/hadoop_common/templates/hdfs-site.xml.j2 @@ -2,7 +2,7 @@ -{% for key, value in HDFS_SITE_DEFAULT_CONFIG.iteritems() %} +{% for key, value in HDFS_SITE_DEFAULT_CONFIG.items() %} {{ key }} {{ value }} diff --git a/playbooks/roles/hadoop_common/templates/mapred-site.xml.j2 b/playbooks/roles/hadoop_common/templates/mapred-site.xml.j2 index cb28b54a7bd..e791db6f4cf 100644 --- a/playbooks/roles/hadoop_common/templates/mapred-site.xml.j2 +++ b/playbooks/roles/hadoop_common/templates/mapred-site.xml.j2 @@ -2,7 +2,7 @@ -{% for key, value in MAPRED_SITE_DEFAULT_CONFIG.iteritems() %} +{% for key, value in MAPRED_SITE_DEFAULT_CONFIG.items() %} {{ key }} {{ value }} diff --git a/playbooks/roles/hadoop_common/templates/yarn-site.xml.j2 b/playbooks/roles/hadoop_common/templates/yarn-site.xml.j2 index 01d96307ccb..ab4007619de 100644 --- a/playbooks/roles/hadoop_common/templates/yarn-site.xml.j2 +++ b/playbooks/roles/hadoop_common/templates/yarn-site.xml.j2 @@ -1,7 +1,7 @@ {% do YARN_SITE_DEFAULT_CONFIG.update(yarn_site_config) %} -{% for key, value in YARN_SITE_DEFAULT_CONFIG.iteritems() %} +{% for key, value in YARN_SITE_DEFAULT_CONFIG.items() %} {{ key }} {{ value }} diff --git a/playbooks/roles/hermes/defaults/main.yml b/playbooks/roles/hermes/defaults/main.yml new file mode 100644 index 00000000000..75161c46ac0 --- /dev/null +++ b/playbooks/roles/hermes/defaults/main.yml @@ -0,0 +1,87 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role hermes +# + +# +# vars are namespaced with the module name. +# + +hermes_user: hermes +hermes_role_name: hermes +hermes_service_name: hermes +hermes_source_repo: https://github.com/edx/hermes.git +HERMES_VERSION: master +hermes_version: "{{ HERMES_VERSION }}" +hermes_app_dir: "{{ COMMON_APP_DIR }}/{{ hermes_service_name }}" +hermes_download_dir: "{{ hermes_app_dir }}/downloads" +hermes_code_dir: "{{ hermes_app_dir }}/{{ hermes_service_name }}" +hermes_venv_dir: "{{ hermes_app_dir }}/venvs/{{ hermes_service_name }}" +hermes_venv_bin: "{{ hermes_venv_dir }}/bin" +hermes_manifest_yaml_file_path: "{{ COMMON_CFG_DIR }}/{{ hermes_service_name }}.yml" +hermes_private_key_file_path: "{{ hermes_app_dir }}/{{ hermes_service_name }}-private-key" +hermes_environment: + PATH: "{{ hermes_venv_dir }}/bin:{{ ansible_env.PATH }}" +# +# OS packages +# + +hermes_debian_pkgs: [] + +hermes_redhat_pkgs: [] + +# the name of the yaml file on disk that the application is looking for without the suffix. +# this is usually the play used to configure the service, but not always. +HERMES_TARGET_SERVICE: "Not configured" + +# HERMES_INTERVAL is how often to check S3 for updates +HERMES_INTERVAL: 60 + +# HERMES_JITTER is how much jitter to add to HERMES_INTERVAL. +# Each time hermes sleeps between check is a random number between HERMES_INTERVAL and HERMES_INTERVAL+HERMES_JITTER +HERMES_JITTER: 40 + +# Enable pre_hermes_checks.sh in systemd service file +HERMES_ENABLE_PRE_CHECK_SCRIPT: False + +# Where to download config file from, start with s3:// or https:// +HERMES_REMOTE_FILE_LOCATION: "Not configured" + +# Where to download the file from eg: s3://some-bucket/{{ HERMES_TARGET_SERVICE }}.yml +HERMES_REMOTE_FILE_PATH: "{{ HERMES_REMOTE_FILE_LOCATION }}/{{ COMMON_ENVIRONMENT }}/{{ HERMES_TARGET_SERVICE }}.yml" + +# Where to download the file to +HERMES_LOCAL_FILE_PATH: "{{ hermes_download_dir }}/{{ HERMES_TARGET_SERVICE }}.yml" + +# How to copy the downloaded file to the config path the application expects +HERMES_COPY_COMMAND: /bin/cp {{ hermes_download_dir }}/{{ HERMES_TARGET_SERVICE }}.yml {{ COMMON_CFG_DIR }}/{{ HERMES_TARGET_SERVICE }}.yml + +# How to restart the application +HERMES_RELOAD_COMMAND: /edx/bin/supervisorctl signal HUP all + +# Hermes typically reloads one config file and reloads all the services on that box, typically 1 service for us +# if you need to do something more complex like reloading multiple services, running additional commands etc +# you could overwrite HERMES_SERVICE_CONFIG directly and ignore these defaults +HERMES_SERVICE_CONFIG: + - url: '{{ HERMES_REMOTE_FILE_PATH }}' + filename: '{{ HERMES_LOCAL_FILE_PATH }}' + command: 'sudo {{ HERMES_COPY_COMMAND }} && sudo {{ HERMES_RELOAD_COMMAND }}' + secret_key_files: "{{ HERMES_PRIVATE_KEYS_DICT | map('regex_replace','(.*)','/edx/app/hermes/hermes-\\1') | join(',') if HERMES_PRIVATE_KEYS_DICT is defined else None }}" + +# These are dropped into sudoers for the user that runs this program, care should be taken to ensure they are safe +# to run. By default we assume the 1 service per box and restart supervisor model. If you did something custom with +# HERMES_SERVICE_CONFIG you may need to make adjustments here as well to give the application user permission to perform the actions +# in its service config +HERMES_ALLOWED_SUDO_COMMANDS: + - "{{ HERMES_COPY_COMMAND }}" + - "{{ HERMES_RELOAD_COMMAND }}" + +HERMES_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-{{ hermes_service_name }}" diff --git a/playbooks/roles/hermes/files/pre_hermes_checks.sh b/playbooks/roles/hermes/files/pre_hermes_checks.sh new file mode 100644 index 00000000000..9dfdce8c3cb --- /dev/null +++ b/playbooks/roles/hermes/files/pre_hermes_checks.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# This is only relevant for AWS instances, and shouldnt be added or run otherwise. +# This script exists because when we build amis we take a snapshot and when we take this snapshot +# the best practice is to reboot the instance since if you do not do this reboot the instance's +# file system integrity cannot be guaranteed. +# Since we monitor hermes, this causes errors that are not a problem to be logged when hermes fails to run correctly +# on build boxes. +# This script is run before hermes is started, preventing it from booting during builds. + +# Default startup timeout in systemd is 60 seconds, sleep 50 means we should return before the timeout +sleep_time=50 + +# This is a hack to sleep and then return 1 if on a build box +# The sleep slows down the looping caused by systemd trying to start the service again if it failed. +# Just returning 1 causes tons of "Unit entered failed state" messages. This will reduce them to 1 a minute or so. +if aws sts get-caller-identity --output=text --query 'Arn' | grep -q 'gocd'; then + echo "Detected build server, sleeping ${sleep_time} seconds to reduce log noise" + sleep $sleep_time + exit 1 +fi diff --git a/playbooks/roles/hermes/meta/main.yml b/playbooks/roles/hermes/meta/main.yml new file mode 100644 index 00000000000..78128d26e16 --- /dev/null +++ b/playbooks/roles/hermes/meta/main.yml @@ -0,0 +1,29 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role hermes +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } +dependencies: + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ hermes_service_name }}" + edx_service_with_rendered_config_service_config: "{{ HERMES_SERVICE_CONFIG }}" + edx_service_with_rendered_config_user: "{{ hermes_user }}" + edx_service_with_rendered_config_home: "{{ hermes_app_dir }}" + edx_service_with_rendered_config_packages: + debian: "{{ hermes_debian_pkgs }}" + redhat: "{{ hermes_redhat_pkgs }}" diff --git a/playbooks/roles/hermes/tasks/main.yml b/playbooks/roles/hermes/tasks/main.yml new file mode 100644 index 00000000000..a803e63c67d --- /dev/null +++ b/playbooks/roles/hermes/tasks/main.yml @@ -0,0 +1,202 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role hermes +# +# Overview: +# +# Dependencies: +# +# +# Example play: +# +# + +# The deadsnakes PPA is required to install python3.6 on Xenial. +# Bionic comes with python3.6 installed. +- name: add deadsnakes repository + apt_repository: + repo: "ppa:fkrull/deadsnakes" + when: ansible_distribution_release == 'xenial' + tags: + - install + - install:system-requirements + +- name: install python3.6 + apt: + name: "{{ item }}" + with_items: + - python3.6 + - python3-pip + when: ansible_distribution_release == 'xenial' + tags: + - install + - install:system-requirements + +- name: build virtualenv with python3 + command: "virtualenv --python=/usr/bin/python3.6 {{ hermes_venv_dir }}" + args: + creates: "{{ hermes_venv_dir }}/bin/pip" + become_user: "{{ hermes_user }}" + tags: + - install + - install:system-requirements + +- name: git checkout hermes repo into {{ hermes_code_dir }} + git: + dest: "{{ hermes_code_dir }}" + repo: "{{ hermes_source_repo }}" + version: "{{ hermes_version }}" + accept_hostkey: yes + become_user: "{{ hermes_user }}" + register: hermes_checkout + tags: + - install + - install:code + +- name: run make requirements + command: make requirements + args: + chdir: "{{ hermes_code_dir }}" + become_user: "{{ hermes_user }}" + environment: "{{ hermes_environment }}" + tags: + - install + - install:app-requirements + +- name: write out the service wrapper + template: + src: "templates/hermes.sh.j2" + dest: "{{ hermes_app_dir }}/{{ hermes_service_name }}.sh" + mode: 0700 + owner: "{{ hermes_user }}" + group: "{{ hermes_user }}" + tags: + - install + - install:configuration + +- name: setup the app env file + template: + src: "templates/hermes_env.j2" + dest: "{{ hermes_app_dir }}/{{ hermes_service_name }}_env" + owner: "{{ hermes_user }}" + group: "{{ hermes_user }}" + mode: 0644 + tags: + - install + - install:configuration + +- name: Create download directory + file: + path: "{{ item }}" + state: directory + owner: "{{ hermes_user }}" + group: "{{ common_web_group }}" + mode: "0770" + with_items: + - "{{ hermes_download_dir }}" + tags: + - install + - install:base + +- name: install private key + copy: + content: "{{ item.value }}" + dest: "{{ hermes_app_dir }}/{{ hermes_service_name }}-{{ item.key }}" + force: yes + owner: "{{ hermes_user }}" + mode: "0600" + no_log: True + with_dict: "{{ HERMES_PRIVATE_KEYS_DICT }}" + when: HERMES_PRIVATE_KEYS_DICT is defined + tags: + - install + - install:base + +- name: Add sudoers entry + template: + src: "templates/sudoers.j2" + dest: "/etc/sudoers.d/hermes" + owner: "root" + group: "root" + mode: 0440 + tags: + - install + - install:base + +- name: create symlinks from the venv bin dir + file: + src: "{{ hermes_venv_dir }}/bin/{{ item }}" + dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ hermes_service_name }}" + state: link + with_items: + - python + - pip + tags: + - install + - install:app-requirements + + +- name: Create hermes systemd job + template: + src: "hermes-systemd.service.j2" + dest: "/etc/systemd/system/{{ hermes_service_name }}.service" + owner: root + group: root + when: ansible_distribution_release == 'xenial' or ansible_distribution_release == 'bionic' + tags: + - install + - install:base + + # This command and the subsequent check in the when condition are related + # to this bug: https://github.com/ansible/ansible-modules-core/issues/593 +- name: Are we in a Docker container + shell: echo $(egrep -q 'docker' /proc/self/cgroup && echo 'yes' || echo 'no') + ignore_errors: yes + register: docker_container + tags: + - install + - install:base + +- name: Enable hermes to start on boot + service: + name: "{{ hermes_service_name }}.service" + enabled: yes + when: (ansible_distribution_release == 'xenial' or ansible_distribution_release == 'bionic') and docker_container.stdout != 'yes' + tags: + - install + - install:base + +- name: Write the pre_hermes python script + copy: + src: pre_hermes_checks.sh + dest: "{{ hermes_app_dir }}/pre_hermes_checks.sh" + owner: "{{ hermes_user }}" + group: "{{ hermes_user }}" + mode: "0750" + when: HERMES_ENABLE_PRE_CHECK_SCRIPT + +- name: reload systemd configuration + command: systemctl daemon-reload + when: not disable_edx_services + tags: + - install + - install:configuration + +- name: Start hermes + service: + name: "{{ hermes_service_name }}" + state: started + register: start_hermes + when: not disable_edx_services + tags: + - manage + - manage:start diff --git a/playbooks/roles/hermes/templates/hermes-systemd.service.j2 b/playbooks/roles/hermes/templates/hermes-systemd.service.j2 new file mode 100644 index 00000000000..f9c2b7eb316 --- /dev/null +++ b/playbooks/roles/hermes/templates/hermes-systemd.service.j2 @@ -0,0 +1,23 @@ +[Unit] +Description=Hermes - Hermes is the messenger/bureaucrat of the edx stack. It fetches documents and files them on a server's filesystem on a regular basis +Documentation=https://github.com/edx/hermes/blob/master/README.md +After=network.target + +[Service] +User={{ hermes_user }} + +Type=simple +TimeoutSec=60 + +Restart=always +RestartSec=1 + +WorkingDirectory={{ hermes_code_dir }} +ExecStart={{ hermes_app_dir }}/{{ hermes_service_name }}.sh -i {{ HERMES_INTERVAL }} -j {{ HERMES_JITTER }} -y {{ hermes_manifest_yaml_file_path }} + +{% if HERMES_ENABLE_PRE_CHECK_SCRIPT %} +ExecStartPre={{ hermes_app_dir }}/pre_hermes_checks.sh +{%- endif %} + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/hermes/templates/hermes.sh.j2 b/playbooks/roles/hermes/templates/hermes.sh.j2 new file mode 100644 index 00000000000..f2171cc8e49 --- /dev/null +++ b/playbooks/roles/hermes/templates/hermes.sh.j2 @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# {{ ansible_managed }} + +source {{ hermes_app_dir }}/{{ hermes_service_name }}_env +{% if COMMON_ENABLE_NEWRELIC_APP %} +{% set executable = hermes_venv_bin + '/newrelic-admin run-program ' + hermes_venv_bin + '/python' %} +{% else %} +{% set executable = hermes_venv_bin + '/python' %} +{% endif %} + +{% if COMMON_ENABLE_NEWRELIC_APP %} +export NEW_RELIC_APP_NAME="{{ hermes_service_name }}" +if command -v ec2metadata >/dev/null 2>&1; then + INSTANCEID=$(ec2metadata --instance-id); + HOSTNAME=$(hostname) + export NEW_RELIC_PROCESS_HOST_DISPLAY_NAME="$HOSTNAME-$INSTANCEID" +fi +export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" +{% endif -%} + +# We exec so that hermes is the child of systemd and can be managed properly +exec {{ executable }} {{ hermes_code_dir }}/hermes.py $@ diff --git a/playbooks/roles/hermes/templates/hermes_env.j2 b/playbooks/roles/hermes/templates/hermes_env.j2 new file mode 100644 index 00000000000..ff4b674c823 --- /dev/null +++ b/playbooks/roles/hermes/templates/hermes_env.j2 @@ -0,0 +1,6 @@ +# {{ ansible_managed }} +{% for name,value in hermes_environment.items() %} +{%- if value %} +export {{ name }}="{{ value }}" +{%- endif %} +{% endfor %} diff --git a/playbooks/roles/hermes/templates/sudoers.j2 b/playbooks/roles/hermes/templates/sudoers.j2 new file mode 100644 index 00000000000..892c5e76d68 --- /dev/null +++ b/playbooks/roles/hermes/templates/sudoers.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +{% for line in HERMES_ALLOWED_SUDO_COMMANDS %} +{{ hermes_user }} ALL=(root) NOPASSWD: {{ line }} +{% endfor %} diff --git a/playbooks/roles/hive/templates/hive-site.xml.j2 b/playbooks/roles/hive/templates/hive-site.xml.j2 index 7ab279a6969..26d5b9ed9a9 100644 --- a/playbooks/roles/hive/templates/hive-site.xml.j2 +++ b/playbooks/roles/hive/templates/hive-site.xml.j2 @@ -2,7 +2,7 @@ -{% for key, value in HIVE_SITE_DEFAULT_CONFIG.iteritems() %} +{% for key, value in HIVE_SITE_DEFAULT_CONFIG.items() %} {{ key }} {{ value }} diff --git a/playbooks/roles/hotg/defaults/main.yml b/playbooks/roles/hotg/defaults/main.yml new file mode 100644 index 00000000000..56ecbbc5c6b --- /dev/null +++ b/playbooks/roles/hotg/defaults/main.yml @@ -0,0 +1,116 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://github.com/edx/configuration/wiki +# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +# +# Defaults for role hotg +# +hotg_role_name: "hotg" +HOTG_SERVICE_NAME: "hotg" +HOTG_USER: "{{ HOTG_SERVICE_NAME }}" + +# AWS Account details +HOTG_ACCOUNT_ID: !!null +HOTG_ACCOUNT_NAME: "my aws account" +HOTG_AWS_ACCESS_ID: !!null +HOTG_AWS_SECRET_KEY: !!null + +HOTG_GITHUB_OAUTH_KEY: "replace with your key" +HOTG_GITHUB_OAUTH_SECRET: "replace with your secret" +hotg_github_oauth_scope: "user:email,read:org" + +HOTG_GITHUB_EMAIL_REGEX: "/@example.com$/" +HOTG_GITHUB_TEAM_REGEX: "/^your-github-group$/" + +HOTG_GITHUB_ORG: "edx" +HOTG_GITHUB_ORG_ID: "111111" + +HOTG_APITOKEN_ENABLED: "true" +# A list to allow graceful retirment, first item used for +# new requests. +HOTG_APITOKEN_ENCRYPTION_KEYS: + - "CHANGEME" + +# Java tuning +HOTG_JAVA_MIN_HEAP: "2g" +HOTG_JAVA_MAX_HEAP: "2g" +HOTG_JAVA_MAX_PERM: "128m" + +# The build of our Asgard fork to deploy +HOTG_GIT_REVISION: "b813d0612c9da8b2a38c6d12c8d9020554528436" + +## Authentication configuration +HOTG_PORT: 8090 +HOTG_URL: "http://localhost:{{ HOTG_PORT }}" +HOTG_CALLBACK_URI: "{{ HOTG_URL }}/auth/signIn" +HOTG_SUCCESS_URI: "{{ HOTG_URL }}" +HOTG_AUTHENTICATION_PROVIDER: "githubOauthAuthenticationProvider" + +# Instance types configuration, e.g. +# +# HOTG_ADDITIONAL_INSTANCE_TYPES: +# t3.xlarge: # Required, AWS instance type +# price: 0.052 # Required, must be a number +# # Remaining fields are optional strings, e.g. +# family: 'Burstable' +# group: 't3' +# vCpu: '4' +# mem: '16.00' +HOTG_ADDITIONAL_INSTANCE_TYPES: {} + +# Email configuration +HOTG_EMAIL_FROM_ADDRESS: "asgard@example.com" +HOTG_EMAIL_SYSTEM_FROM_ADDRESS: "asgard@example.com" +HOTG_SMTP_HOST: "localhost" +HOTG_SMTP_PORT: 25 + +# General configuration +HOTG_AWS_REGIONS: + - "us-east-1" + - "us-west-1" + - "us-west-2" + - "eu-west-1" + - "sa-east-1" + - "ap-northeast-1" + - "ap-southeast-1" + - "ap-southeast-2" + +HOTG_MAX_GROUPS: 6 + +# +# vars are namespace with the module name. +# + +HOTG_TOMCAT_VERSION: "7.0.54" +hotg_tomcat_package: "https://s3.amazonaws.com/edx-static/tomcat/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}.tar.gz" +HOTG_TOMCAT_SHUTDOWN_PORT: 8005 +HOTG_TOMCAT_CONNECTOR_PORT: 8090 +HOTG_TOMCAT_REDIRECT_PORT: 8443 +HOTG_TOMCAT_AJP_PORT: 8009 +HOTG_TOMCAT_HOME: "{{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}" + +# For managing ssl termination via ELB or NGINX. +HOTG_SSL_TERMINATED_ELSEWHERE: false +HOTG_TOMCAT_SSL_PORT: 443 +HOTG_TOMCAT_PROXY_NAME: "hotg.example.com" +HOTG_NGINX_PORT: 80 + +hotg_app_dir: "{{ COMMON_APP_DIR }}/{{ HOTG_SERVICE_NAME }}" +hotg_home: "{{ COMMON_APP_DIR }}/{{ HOTG_SERVICE_NAME }}" +hotg_data_dir: "{{ hotg_home }}/data" +hotg_log_dir: "{{ COMMON_LOG_DIR }}/{{ HOTG_SERVICE_NAME }}" + +# +# OS packages +# + +hotg_debian_pkgs: [] + +hotg_pip_pkgs: [] + +hotg_redhat_pkgs: [] diff --git a/playbooks/roles/hotg/meta/main.yml b/playbooks/roles/hotg/meta/main.yml new file mode 100644 index 00000000000..8960d457f41 --- /dev/null +++ b/playbooks/roles/hotg/meta/main.yml @@ -0,0 +1,24 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://github.com/edx/configuration/wiki +# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role hotg +# +# Example: +# +# dependencies: +# - { +# role: my_role +# my_role_var0: "foo" +# my_role_var1: "bar" +# } + +dependencies: + - role: oraclejdk + - supervisor diff --git a/playbooks/roles/hotg/tasks/deploy.yml b/playbooks/roles/hotg/tasks/deploy.yml new file mode 100644 index 00000000000..f97daea5248 --- /dev/null +++ b/playbooks/roles/hotg/tasks/deploy.yml @@ -0,0 +1,98 @@ +--- + +- name: create supervisor wrapper + template: > + src=edx/app/hotg/supervisor_wrapper.sh.j2 + dest={{ hotg_app_dir }}/supervisor_wrapper.sh + owner={{ HOTG_USER }} group={{ common_web_user }} mode=0750 + tags: + - install + - install:base + +- name: create supervisor script + template: > + src=edx/app/supervisor/conf.d/hotg.conf.j2 + dest={{ supervisor_available_dir }}/{{ HOTG_SERVICE_NAME }}.conf + owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 + tags: + - install + - install:base + +- name: enable supervisor scripts + file: > + src={{ supervisor_available_dir }}/{{ HOTG_SERVICE_NAME }}.conf + dest={{ supervisor_cfg_dir }}/{{ HOTG_SERVICE_NAME }}.conf + owner={{ supervisor_user }} group={{ common_web_user }} + mode=0644 state=link force=yes + when: not disable_edx_services + tags: + - manage + - manage:update + +- name: update supervisor configuration + shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" + when: not disable_edx_services + tags: + - manage + - manage:update + +- name: stop the service + supervisorctl: > + state=stopped + supervisorctl_path={{ supervisor_ctl }} + config={{ supervisor_cfg }} + name="{{ HOTG_SERVICE_NAME }}" + sudo_user: "{{ supervisor_service_user }}" + tags: + - manage + - manage:stop + +- name: create hotg application config + template: > + src=edx/app/hotg/Config.groovy.j2 + dest={{ hotg_app_dir }}/Config.groovy + mode=0644 + sudo_user: "{{ HOTG_USER }}" + tags: + - install + - install:configuration + + # + # Workaround a bug in ansible where it fails if it cannot verify + # a files md5sum, which it cannot do for large files +- name: remove old war + file: > + path={{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}/webapps/ROOT.war + state=absent + tags: + - install + - install:base + + +- name: grab the war file from s3 + get_url: + url: "https://files.edx.org/devops/jenkins_assets/asgard.war.{{ HOTG_GIT_REVISION }}/asgard.war" + dest: "{{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}/webapps/ROOT.war" + force: yes + tags: + - install + - install:base + +- name: remove exploded war directory + file: > + path={{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}/webapps/ROOT + state=absent + tags: + - install + - install:base + +- name: start the service + supervisorctl: > + state=started + supervisorctl_path={{ supervisor_ctl }} + config={{ supervisor_cfg }} + name="{{ HOTG_SERVICE_NAME }}" + sudo_user: "{{ supervisor_service_user }}" + tags: + - manage + - manage:start diff --git a/playbooks/roles/hotg/tasks/main.yml b/playbooks/roles/hotg/tasks/main.yml new file mode 100644 index 00000000000..cdc8864947a --- /dev/null +++ b/playbooks/roles/hotg/tasks/main.yml @@ -0,0 +1,141 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://github.com/edx/configuration/wiki +# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +# +# +# Tasks for role hotg +# +# Overview: +# +# Deploys an edX forked version of Netflix's Asgard +# under the name Hammer of the gods. +# +# Dependencies: +# +# Example play: +# +# - name: Configure instance(s) +# hosts: hotg +# sudo: True +# vars_files: +# - "{{ secure_dir }}/vars/common/common.yml" +# - "{{ secure_dir }}/vars/users.yml" +# - "{{ secure_dir }}/vars/env/hotg.yml" +# gather_facts: True +# roles: +# - common +# - gh_users +# - oraclejdk +# - splunkforwarder +# - hotg +# + +- name: create application user + user: > + name="{{ HOTG_SERVICE_NAME }}" + home="{{ COMMON_APP_DIR }}/{{ HOTG_SERVICE_NAME }}" + createhome=no + shell=/bin/false + tags: + - install + - install:base + +- name: create common directories + file: > + path="{{ item }}" + state=directory + owner="{{ HOTG_SERVICE_NAME }}" + group="{{ common_web_group }}" + with_items: + - "{{ COMMON_APP_DIR }}/{{ HOTG_SERVICE_NAME }}" + - "{{ COMMON_APP_DIR }}/{{ HOTG_SERVICE_NAME }}/data" + tags: + - install + - install:base + +- name: create directories owned by www-data + file: > + path="{{ item }}" + state=directory + owner="{{ common_web_group }}" + group="{{ common_web_group }}" + with_items: + - "{{ COMMON_LOG_DIR }}/{{ HOTG_SERVICE_NAME }}" + tags: + - install + - install:base + +- name: install a bunch of system packages on which hotg relies + apt: pkg={{ item }} state=present + with_items: "{{ hotg_debian_pkgs }}" + when: ansible_distribution in common_debian_variants + tags: + - install + - install:base + +- name: install a bunch of system packages on which hotgs relies + yum: pkg={{ item }} state=present + with_items: hotg_redhat_pkgs + when: ansible_distribution in common_redhat_variants + tags: + - install + - install:base + +# +# Install tomcat +# +- name: download the tomcat archive + get_url: > + dest="/tmp/{{ hotg_tomcat_package|basename }}" + url="{{ hotg_tomcat_package }}" + register: download_tomcat + tags: + - install + - install:base + +- name: explode the archive + shell: > + tar xf /tmp/{{ hotg_tomcat_package|basename }} + creates={{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }} + chdir={{ hotg_app_dir }} + tags: + - install + - install:base + +- name: chown of the tomcat dir + command: chown -R {{ common_web_user }} "{{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}" + tags: + - install + - install:base + +- name: create hotg tomcat server.xml config + template: > + src=edx/app/hotg/server.xml.j2 + dest={{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}/conf/server.xml + mode=0644 + owner="root" + group="{{ HOTG_USER }}" + tags: + - install + - install:configuratin + +- name: remove unneeded webapps + file: > + path={{ hotg_app_dir }}/apache-tomcat-{{ HOTG_TOMCAT_VERSION }}/webapps/{{ item }} + state=absent + with_items: + - docs + - examples + - host-manager + - manager + tags: + - install + - install:base + +- include: deploy.yml tags=deploy diff --git a/playbooks/roles/hotg/templates/edx/app/hotg/Config.groovy.j2 b/playbooks/roles/hotg/templates/edx/app/hotg/Config.groovy.j2 new file mode 100644 index 00000000000..118608c2c78 --- /dev/null +++ b/playbooks/roles/hotg/templates/edx/app/hotg/Config.groovy.j2 @@ -0,0 +1,403 @@ +import com.netflix.asgard.model.HardwareProfile +import com.netflix.asgard.model.InstanceTypeData + +grails { + awsAccounts=['{{ HOTG_ACCOUNT_ID }}'] + awsAccountNames=['{{ HOTG_ACCOUNT_ID }}':'{{ HOTG_ACCOUNT_NAME }}'] + serverUrl='{{ HOTG_URL }}' +} + +secret { + accessId='{{ HOTG_AWS_ACCESS_ID }}' + secretKey='{{ HOTG_AWS_SECRET_KEY }}' +} + +cloud { + accountName='{{ HOTG_ACCOUNT_NAME }}' + publicResourceAccounts=['amazon'] + customInstanceTypes = [ + new InstanceTypeData(linuxOnDemandPrice: 0.085, hardwareProfile: + new HardwareProfile(instanceType: 'c5.large', + family: 'Compute Optimized', group: 'c5', + size: 'Large', arch: '64-bit', vCpu: '2', ecu: '8', + mem: '4.0', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 0.170, hardwareProfile: + new HardwareProfile(instanceType: 'c5.xlarge', + family: 'Compute Optimized', group: 'c5', + size: 'Extra Large', arch: '64-bit', vCpu: '4', ecu: '16', + mem: '8.0', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 0.340, hardwareProfile: + new HardwareProfile(instanceType: 'c5.2xlarge', + family: 'Compute Optimized', group: 'c5', + size: 'Double Extra Large', arch: '64-bit', vCpu: '8', ecu: '31', + mem: '16.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 0.680, hardwareProfile: + new HardwareProfile(instanceType: 'c5.4xlarge', + family: 'Compute Optimized', group: 'c5', + size: 'Quadruple Extra Large', arch: '64-bit', vCpu: '16', ecu: '62', + mem: '32.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 1.530, hardwareProfile: + new HardwareProfile(instanceType: 'c5.9xlarge', + family: 'Compute Optimized', group: 'c5', + size: 'Nine Extra Large', arch: '64-bit', vCpu: '36', ecu: '132', + mem: '72.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '10 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 3.060, hardwareProfile: + new HardwareProfile(instanceType: 'c5.18xlarge', + family: 'Compute Optimized', group: 'c5', + size: '18 Extra Large', arch: '64-bit', vCpu: '72', ecu: '264', + mem: '144.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '25 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.126, hardwareProfile: + new HardwareProfile(instanceType: 'r5.large', + family: 'Memory Optimized', group: 'r5', + size: 'Large', arch: '64-bit', vCpu: '2', ecu: '8', + mem: '16.0', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 0.252, hardwareProfile: + new HardwareProfile(instanceType: 'r5.xlarge', + family: 'Memory Optimized', group: 'r5', + size: 'Extra Large', arch: '64-bit', vCpu: '4', ecu: '16', + mem: '32.0', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 0.504, hardwareProfile: + new HardwareProfile(instanceType: 'r5.2xlarge', + family: 'Memory Optimized', group: 'r5', + size: 'Double Extra Large', arch: '64-bit', vCpu: '8', ecu: '31', + mem: '64.0', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 1.008, hardwareProfile: + new HardwareProfile(instanceType: 'r5.4xlarge', + family: 'Memory Optimized', group: 'r5', + size: 'Quadruple Extra Large', arch: '64-bit', vCpu: '16', ecu: '62', + mem: '128.0', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Up to 10 Gbps')), + new InstanceTypeData(linuxOnDemandPrice: 3.024, hardwareProfile: + new HardwareProfile(instanceType: 'r5.12xlarge', + family: 'Memory Optimized', group: 'r5', + size: 'Eight Extra Large', arch: '64-bit', vCpu: '36', ecu: '132', + mem: '384.0', storage: 'EBS only', ebsOptim: '-', + netPerf: '10 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 6.048, hardwareProfile: + new HardwareProfile(instanceType: 'r5.24xlarge', + family: 'Memory Optimized', group: 'r5', + size: 'Eight Extra Large', arch: '64-bit', vCpu: '72', ecu: '264', + mem: '768.0', storage: 'EBS only', ebsOptim: '-', + netPerf: '25 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.096, hardwareProfile: + new HardwareProfile(instanceType: 'm5.large', + family: 'General Purpose', group: 'm5', + size: 'Large', arch: '64-bit', vCpu: '2', ecu: '6.5', + mem: '8.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Moderate')), + new InstanceTypeData(linuxOnDemandPrice: 0.192, hardwareProfile: + new HardwareProfile(instanceType: 'm5.xlarge', + family: 'General Purpose', group: 'm5', + size: 'Extra Large', arch: '64-bit', vCpu: '4', ecu: '13', + mem: '16.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 0.384, hardwareProfile: + new HardwareProfile(instanceType: 'm5.2xlarge', + family: 'General Purpose', group: 'm5', + size: 'Double Extra Large', arch: '64-bit', vCpu: '8', ecu: '26', + mem: '32.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 0.768, hardwareProfile: + new HardwareProfile(instanceType: 'm5.4xlarge', + family: 'General Purpose', group: 'm5', + size: 'Quadruple Extra Large', arch: '64-bit', vCpu: '16', ecu: '53.5', + mem: '64.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 2.304, hardwareProfile: + new HardwareProfile(instanceType: 'm5.12xlarge', + family: 'General Purpose', group: 'm5', + size: '12 Extra Large', arch: '64-bit', vCpu: '48', ecu: '124.5', + mem: '192.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '10 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 4.608, hardwareProfile: + new HardwareProfile(instanceType: 'm5.24xlarge', + family: 'General Purpose', group: 'm5', + size: '24 Extra Large', arch: '64-bit', vCpu: '96', ecu: '124.5', + mem: '384.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '10 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.006, hardwareProfile: + new HardwareProfile(instanceType: 't2.nano', + family: 'Burstable', group: 't2', + size: 'Nano', arch: '64-bit', vCpu: '1', ecu: '1', + mem: '0.50', storage: 'EBS only', ebsOptim: '-', + netPerf: '?')), + new InstanceTypeData(linuxOnDemandPrice: 0.013, hardwareProfile: + new HardwareProfile(instanceType: 't2.micro', + family: 'Burstable', group: 't2', + size: 'Micro', arch: '64-bit', vCpu: '1', ecu: '1', + mem: '1.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '?')), + new InstanceTypeData(linuxOnDemandPrice: 0.026, hardwareProfile: + new HardwareProfile(instanceType: 't2.small', + family: 'Burstable', group: 't2', + size: 'Small', arch: '64-bit', vCpu: '1', ecu: '1', + mem: '2.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '?')), + new InstanceTypeData(linuxOnDemandPrice: 0.052, hardwareProfile: + new HardwareProfile(instanceType: 't2.medium', + family: 'Burstable', group: 't2', + size: 'Medium', arch: '64-bit', vCpu: '2', ecu: '3', + mem: '4.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '?')), + new InstanceTypeData(linuxOnDemandPrice: 0.052, hardwareProfile: + new HardwareProfile(instanceType: 't2.large', + family: 'Burstable', group: 't2', + size: 'Large', arch: '64-bit', vCpu: '2', ecu: '6', + mem: '4.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '?')), + new InstanceTypeData(linuxOnDemandPrice: 0.120, hardwareProfile: + new HardwareProfile(instanceType: 'm4.large', + family: 'General Purpose', group: 'm4', + size: 'Large', arch: '64-bit', vCpu: '2', ecu: '6.5', + mem: '8.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Moderate')), + new InstanceTypeData(linuxOnDemandPrice: 0.239, hardwareProfile: + new HardwareProfile(instanceType: 'm4.xlarge', + family: 'General Purpose', group: 'm4', + size: 'Extra Large', arch: '64-bit', vCpu: '4', ecu: '13', + mem: '16.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 0.479, hardwareProfile: + new HardwareProfile(instanceType: 'm4.2xlarge', + family: 'General Purpose', group: 'm4', + size: 'Double Extra Large', arch: '64-bit', vCpu: '8', ecu: '26', + mem: '32.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 0.958, hardwareProfile: + new HardwareProfile(instanceType: 'm4.4xlarge', + family: 'General Purpose', group: 'm4', + size: 'Quadruple Extra Large', arch: '64-bit', vCpu: '16', ecu: '53.5', + mem: '64.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 2.394, hardwareProfile: + new HardwareProfile(instanceType: 'm4.10xlarge', + family: 'General Purpose', group: 'm4', + size: 'Deca Extra Large', arch: '64-bit', vCpu: '40', ecu: '124.5', + mem: '160.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '10 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.105, hardwareProfile: + new HardwareProfile(instanceType: 'c4.large', + family: 'Compute Optimized', group: 'c4', + size: 'Large', arch: '64-bit', vCpu: '2', ecu: '8', + mem: '3.75', storage: 'EBS only', ebsOptim: '-', + netPerf: 'Moderate')), + new InstanceTypeData(linuxOnDemandPrice: 0.209, hardwareProfile: + new HardwareProfile(instanceType: 'c4.xlarge', + family: 'Compute Optimized', group: 'c4', + size: 'Extra Large', arch: '64-bit', vCpu: '4', ecu: '16', + mem: '7.5', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 0.419, hardwareProfile: + new HardwareProfile(instanceType: 'c4.2xlarge', + family: 'Compute Optimized', group: 'c4', + size: 'Double Extra Large', arch: '64-bit', vCpu: '8', ecu: '31', + mem: '15.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 0.838, hardwareProfile: + new HardwareProfile(instanceType: 'c4.4xlarge', + family: 'Compute Optimized', group: 'c4', + size: 'Quadruple Extra Large', arch: '64-bit', vCpu: '16', ecu: '62', + mem: '30.00', storage: 'EBS only', ebsOptim: '-', + netPerf: 'High')), + new InstanceTypeData(linuxOnDemandPrice: 1.675, hardwareProfile: + new HardwareProfile(instanceType: 'c4.8xlarge', + family: 'Compute Optimized', group: 'c4', + size: 'Eight Extra Large', arch: '64-bit', vCpu: '36', ecu: '132', + mem: '60.00', storage: 'EBS only', ebsOptim: '-', + netPerf: '10 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.004700, hardwareProfile: + new HardwareProfile(instanceType: 't3a.nano', + family: 'General Purpose', group: 't3a', + size: 't3a.nano', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '0.50', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.075200, hardwareProfile: + new HardwareProfile(instanceType: 't3a.micro', + family: 'General Purpose', group: 't3a', + size: 't3a.micro', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '1.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.018800, hardwareProfile: + new HardwareProfile(instanceType: 't3a.small', + family: 'General Purpose', group: 't3a', + size: 't3a.small', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '2.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.037600, hardwareProfile: + new HardwareProfile(instanceType: 't3a.medium', + family: 'General Purpose', group: 't3a', + size: 't3a.medium', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '4.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.075200, hardwareProfile: + new HardwareProfile(instanceType: 't3a.large', + family: 'General Purpose', group: 't3a', + size: 't3a.large', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '8.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.150400, hardwareProfile: + new HardwareProfile(instanceType: 't3a.xlarge', + family: 'General Purpose', group: 't3a', + size: 't3a.xlarge', arch: '64-bit', vCpu: '4', ecu: 'n/a', + mem: '16.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.300800, hardwareProfile: + new HardwareProfile(instanceType: 't3a.2xlarge', + family: 'General Purpose', group: 't3a', + size: 't3a.2xlarge', arch: '64-bit', vCpu: '8', ecu: 'n/a', + mem: '32.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.300800, hardwareProfile: + new HardwareProfile(instanceType: 'm5a.large', + family: 'General Purpose', group: 't3a', + size: 'm5a.large', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '8.0', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.172000, hardwareProfile: + new HardwareProfile(instanceType: 'm5a.xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5a.xlarge', arch: '64-bit', vCpu: '4', ecu: 'n/a', + mem: '16.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.344000, hardwareProfile: + new HardwareProfile(instanceType: 'm5a.2xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5a.2xlarge', arch: '64-bit', vCpu: '8', ecu: 'n/a', + mem: '32.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.688000, hardwareProfile: + new HardwareProfile(instanceType: 'm5a.4xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5a.4xlarge', arch: '64-bit', vCpu: '16', ecu: 'n/a', + mem: '64.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 2.064000, hardwareProfile: + new HardwareProfile(instanceType: 'm5a.12xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5a.12xlarge', arch: '64-bit', vCpu: '48', ecu: 'n/a', + mem: '192.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 4.128000, hardwareProfile: + new HardwareProfile(instanceType: 'm5a.24xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5a.24xlarge', arch: '64-bit', vCpu: '96', ecu: 'n/a', + mem: '384.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.103000, hardwareProfile: + new HardwareProfile(instanceType: 'm5ad.large', + family: 'General Purpose', group: 't3a', + size: 'm5ad.large', arch: '64-bit', vCpu: '2', ecu: 'n/a', + mem: '8.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.206000, hardwareProfile: + new HardwareProfile(instanceType: 'm5ad.xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5ad.xlarge', arch: '64-bit', vCpu: '4', ecu: 'n/a', + mem: '16.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.412000, hardwareProfile: + new HardwareProfile(instanceType: 'm5ad.2xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5ad.2xlarge', arch: '64-bit', vCpu: '8', ecu: 'n/a', + mem: '32.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 0.824000, hardwareProfile: + new HardwareProfile(instanceType: 'm5ad.4xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5ad.4xlarge', arch: '64-bit', vCpu: '16', ecu: 'n/a', + mem: '64.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 2.472000, hardwareProfile: + new HardwareProfile(instanceType: 'm5ad.12xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5ad.12xlarge', arch: '64-bit', vCpu: '48', ecu: 'n/a', + mem: '192.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + new InstanceTypeData(linuxOnDemandPrice: 4.944000, hardwareProfile: + new HardwareProfile(instanceType: 'm5ad.24xlarge', + family: 'General Purpose', group: 't3a', + size: 'm5ad.24xlarge', arch: '64-bit', vCpu: '96', ecu: 'n/a', + mem: '384.00', storage: 'EBS only', ebsOptim: 'Y', + netPerf: 'Up to 5 Gigabit')), + {% for instance_type, profile in HOTG_ADDITIONAL_INSTANCE_TYPES.iteritems() %} + + new InstanceTypeData(linuxOnDemandPrice: {{ profile.price }}, hardwareProfile: + new HardwareProfile( + instanceType: '{{ instance_type }}', + family: '{{ profile.family | default("?") }}', + group: '{{ profile.group | default("?") }}', + size: '{{ instance_type }}', + arch: '{{ profile.arch | default("64-bit") }}', + vCpu: '{{ profile.vCpu | default("?") }}', + ecu: '{{ profile.ecu | default("?") }}', + mem: '{{ profile.mem | default("?") }}', + storage: '{{ profile.storage | default("EBS only") }}', + ebsOptim: '{{ profile.ebsOptim | default("?") }}', + netPerf: '{{ profile.netPerf | default("?") }}' + )), + {% endfor %} + ] +} + +cluster { + maxGroups = {{ HOTG_MAX_GROUPS }} +} + +security { + apiToken { + enabled = {{ HOTG_APITOKEN_ENABLED }} + encryptionKeys = [ '{{ HOTG_APITOKEN_ENCRYPTION_KEYS |join("\',\'") }}' ] + } +} + +plugin { + authenticationProvider = '{{ HOTG_AUTHENTICATION_PROVIDER }}' +} + +oauth { + providers { + github { + api = com.netflix.asgard.auth.GitHubApi + key = '{{ HOTG_GITHUB_OAUTH_KEY }}' + secret = '{{ HOTG_GITHUB_OAUTH_SECRET }}' + scope = '{{ hotg_github_oauth_scope }}' + callback = '{{ HOTG_CALLBACK_URI }}' + successUri = '{{ HOTG_SUCCESS_URI }}' + extraArgs { + emailRegex = {{ HOTG_GITHUB_EMAIL_REGEX }} + teamRegex = {{ HOTG_GITHUB_TEAM_REGEX }} + org = '{{ HOTG_GITHUB_ORG }}' + orgId = '{{ HOTG_GITHUB_ORG_ID }}' + } + } + } + } + +email { + userEnabled = true + systemEnabled = true + smtpHost = '{{ HOTG_SMTP_HOST }}' + smtpPort = {{ HOTG_SMTP_PORT }} + smtpUsername = '{{ HOTG_AWS_ACCESS_ID }}' + smtpPassword = '{{ HOTG_AWS_SECRET_KEY }}' + smtpSslEnabled = true + fromAddress = '{{ HOTG_EMAIL_FROM_ADDRESS }}' + systemEmailAddress = '{{ HOTG_EMAIL_FROM_ADDRESS }}' + errorSubjectStart = 'Hammer Error: ' +} + +{% if NEWRELIC_API_KEY is defined and NEWRELIC_ACCOUNT_ID is defined %} +newrelic { + apiKey = '{{ NEWRELIC_API_KEY }}' + accountId = '{{ NEWRELIC_ACCOUNT_ID }}' +} +{% endif %} diff --git a/playbooks/roles/hotg/templates/edx/app/hotg/server.xml.j2 b/playbooks/roles/hotg/templates/edx/app/hotg/server.xml.j2 new file mode 100644 index 00000000000..4940434a5d7 --- /dev/null +++ b/playbooks/roles/hotg/templates/edx/app/hotg/server.xml.j2 @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/playbooks/roles/hotg/templates/edx/app/hotg/supervisor_wrapper.sh.j2 b/playbooks/roles/hotg/templates/edx/app/hotg/supervisor_wrapper.sh.j2 new file mode 100644 index 00000000000..b94b82de49f --- /dev/null +++ b/playbooks/roles/hotg/templates/edx/app/hotg/supervisor_wrapper.sh.j2 @@ -0,0 +1,29 @@ +#!/bin/bash +# Source: https://confluence.atlassian.com/plugins/viewsource/viewpagesrc.action?pageId=252348917 + +export CATALINA_HOME={{ HOTG_TOMCAT_HOME }} +export TOMCAT_HOME={{ HOTG_TOMCAT_HOME }} +export ASGARD_HOME={{ hotg_app_dir }} +export CATALINA_OUT={{ hotg_log_dir }}/catalina.out + +export CATALINA_OPTS="-Djava.awt.headless=true -Xms{{ HOTG_JAVA_MIN_HEAP }} -Xmx{{ HOTG_JAVA_MAX_HEAP }} -verbose:sizes -XX:MaxPermSize={{ HOTG_JAVA_MAX_PERM }} -XX:+HeapDumpOnOutOfMemoryError -XX:-UseGCOverheadLimit -XX:+ExplicitGCInvokesConcurrent -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -DonlyRegions={%- for region in HOTG_AWS_REGIONS -%}{{region}}{%- if not loop.last -%},{%- endif -%}{%- endfor -%}" +export CATALINA_PID=/tmp/$$ +export LD_LIBRARY_PATH=/usr/local/apr/lib + +function shutdown() +{ + date + echo "Shutting down Tomcat" + $TOMCAT_HOME/bin/catalina.sh stop -force +} + +date +echo "Starting Tomcat" + +. $TOMCAT_HOME/bin/catalina.sh start + +# Allow any signal which would kill a process to stop Tomcat +trap shutdown HUP INT QUIT ABRT KILL ALRM TERM TSTP + +echo "Waiting for `cat $CATALINA_PID`" +wait `cat $CATALINA_PID` diff --git a/playbooks/roles/hotg/templates/edx/app/supervisor/conf.d/hotg.conf.j2 b/playbooks/roles/hotg/templates/edx/app/supervisor/conf.d/hotg.conf.j2 new file mode 100644 index 00000000000..ea3a27339c3 --- /dev/null +++ b/playbooks/roles/hotg/templates/edx/app/supervisor/conf.d/hotg.conf.j2 @@ -0,0 +1,6 @@ +[program:{{ HOTG_SERVICE_NAME }}] +directory={{ hotg_app_dir }} +command={{ hotg_app_dir }}/supervisor_wrapper.sh +stdout_logfile=syslog +stderr_logfile=syslog +user={{ common_web_user }} \ No newline at end of file diff --git a/playbooks/roles/insights/defaults/main.yml b/playbooks/roles/insights/defaults/main.yml index 3dd8dd31aba..ff59b61821e 100644 --- a/playbooks/roles/insights/defaults/main.yml +++ b/playbooks/roles/insights/defaults/main.yml @@ -23,10 +23,8 @@ INSIGHTS_SUPPORT_EMAIL: '' INSIGHTS_CMS_COURSE_SHORTCUT_BASE_URL: '{{ INSIGHTS_LMS_BASE }}/course' INSIGHTS_OAUTH2_SECRET: 'secret' INSIGHTS_OAUTH2_URL_ROOT: '{{ INSIGHTS_LMS_BASE }}/oauth2' -INSIGHTS_OIDC_LOGOUT_URL: '{{ INSIGHTS_LMS_BASE }}/logout' INSIGHTS_OAUTH2_APP_CLIENT_NAME: insights INSIGHTS_OAUTH2_APP_USERNAME: staff -INSIGHTS_OAUTH2_APP_COMPLETE_URL: '{{ INSIGHTS_BASE_URL }}/complete/edx-oidc/' INSIGHTS_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false INSIGHTS_SECRET_KEY: 'YOUR_SECRET_KEY_HERE' INSIGHTS_OAUTH2_KEY: 'YOUR_OAUTH2_KEY' @@ -89,6 +87,12 @@ INSIGHTS_CORS_ORIGIN_WHITELIST_DEFAULT: - "{{ INSIGHTS_DOMAIN }}" INSIGHTS_CORS_ORIGIN_WHITELIST: "{{ INSIGHTS_CORS_ORIGIN_WHITELIST_DEFAULT + INSIGHTS_CORS_ORIGIN_WHITELIST_EXTRA }}" +# Remote config +INSIGHTS_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +INSIGHTS_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +INSIGHTS_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + # # This block of config is dropped into /edx/etc/insights.yml # and is read in by analytics_dashboard/settings/production.py @@ -104,13 +108,13 @@ INSIGHTS_CONFIG: DATA_API_URL: '{{ ANALYTICS_API_ENDPOINT }}' DATA_API_AUTH_TOKEN: '{{ INSIGHTS_DATA_API_AUTH_TOKEN }}' SOCIAL_AUTH_REDIRECT_IS_HTTPS: '{{ INSIGHTS_SOCIAL_AUTH_REDIRECT_IS_HTTPS }}' - SOCIAL_AUTH_EDX_OIDC_KEY: '{{ INSIGHTS_OAUTH2_KEY }}' - SOCIAL_AUTH_EDX_OIDC_SECRET: '{{ INSIGHTS_OAUTH2_SECRET }}' - SOCIAL_AUTH_EDX_OIDC_URL_ROOT: '{{ INSIGHTS_OAUTH2_URL_ROOT }}' - SOCIAL_AUTH_EDX_OIDC_ISSUER: '{{ INSIGHTS_OAUTH2_URL_ROOT }}' - SOCIAL_AUTH_EDX_OIDC_LOGOUT_URL: '{{ INSIGHTS_OIDC_LOGOUT_URL }}' - # This value should be the same as SOCIAL_AUTH_EDX_OIDC_SECRET - SOCIAL_AUTH_EDX_OIDC_ID_TOKEN_DECRYPTION_KEY: '{{ INSIGHTS_OAUTH2_SECRET }}' + + # Used to automatically configure OAuth2 Client + INSIGHTS_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'insights-sso-key' + INSIGHTS_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'insights-sso-secret' + INSIGHTS_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'insights-backend-service-key' + INSIGHTS_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'insights-backend-service-secret' + ENABLE_AUTO_AUTH: '{{ INSIGHTS_ENABLE_AUTO_AUTH }}' PLATFORM_NAME: '{{ INSIGHTS_PLATFORM_NAME }}' APPLICATION_NAME: '{{ INSIGHTS_APPLICATION_NAME }}' @@ -150,13 +154,14 @@ INSIGHTS_CONFIG: CSRF_COOKIE_SECURE: "{{ INSIGHTS_CSRF_COOKIE_SECURE }}" INSIGHTS_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-analytics-api" +INSIGHTS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false INSIGHTS_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}" INSIGHTS_NGINX_PORT: "18110" INSIGHTS_NGINX_SSL_PORT: "18113" INSIGHTS_GUNICORN_WORKERS: "2" INSIGHTS_GUNICORN_EXTRA: "" INSIGHTS_COURSE_API_URL: "{{ INSIGHTS_LMS_BASE }}/api/courses/v1/" -INSIGHTS_GRADING_POLICY_API_URL: "{{ INSIGHTS_LMS_BASE }}/api/grades/v0/" +INSIGHTS_GRADING_POLICY_API_URL: "{{ INSIGHTS_LMS_BASE }}/api/grades/v1/" INSIGHTS_MODULE_PREVIEW_URL: "{{ INSIGHTS_LMS_BASE }}/xblock" INSIGHTS_VERSION: "master" @@ -197,7 +202,8 @@ insights_nodeenv_dir: "{{ insights_home }}/nodeenvs/{{ insights_service_name }}" insights_nodeenv_bin: "{{ insights_nodeenv_dir }}/bin" insights_node_modules_dir: "{{ insights_code_dir }}/node_modules" insights_node_bin: "{{ insights_node_modules_dir }}/.bin" -insights_node_version: "{{ common_node_version }}" +INSIGHTS_NODE_VERSION: "12.11.1" +insights_node_version: "{{ INSIGHTS_NODE_VERSION }}" insights_gunicorn_host: "127.0.0.1" insights_gunicorn_port: "8110" @@ -216,9 +222,12 @@ insights_requirements: # OS packages # insights_debian_pkgs: - - 'libmysqlclient-dev' - - 'build-essential' + - libmysqlclient-dev + - libssl-dev # needed for mysqlclient python library + - build-essential - gettext + - python3-pip + - python3-dev insights_release_specific_debian_pkgs: xenial: diff --git a/playbooks/roles/insights/meta/main.yml b/playbooks/roles/insights/meta/main.yml index 8490d78b602..9e32b858f79 100644 --- a/playbooks/roles/insights/meta/main.yml +++ b/playbooks/roles/insights/meta/main.yml @@ -12,7 +12,13 @@ # dependencies: - common - - supervisor + - role: supervisor + supervisor_spec: + - service: "{{ insights_service_name }}" + migration_check_services: "{{ insights_service_name }}" + python: "python.{{ insights_service_name }}" + code: "{{ insights_code_dir | default(None) }}" + env: "{{ insights_home | default(None) }}/insights_env" - role: edx_service edx_service_name: "{{ insights_service_name }}" edx_service_config: "{{ INSIGHTS_CONFIG }}" @@ -22,3 +28,6 @@ dependencies: edx_service_packages: debian: "{{ insights_debian_pkgs + insights_release_specific_debian_pkgs[ansible_distribution_release] }}" redhat: [] + edx_service_decrypt_config_enabled: "{{ INSIGHTS_DECRYPT_CONFIG_ENABLED }}" + edx_service_copy_config_enabled: "{{ INSIGHTS_COPY_CONFIG_ENABLED }}" + diff --git a/playbooks/roles/insights/tasks/main.yml b/playbooks/roles/insights/tasks/main.yml index 8bd8ecda2de..a96486151b2 100644 --- a/playbooks/roles/insights/tasks/main.yml +++ b/playbooks/roles/insights/tasks/main.yml @@ -38,6 +38,7 @@ virtualenv: "{{ insights_venv_dir }}" state: present extra_args: "--exists-action w" + virtualenv_python: python3.5 become_user: "{{ insights_user }}" with_items: "{{ insights_requirements }}" tags: diff --git a/playbooks/roles/insights/templates/edx/app/insights/insights.sh.j2 b/playbooks/roles/insights/templates/edx/app/insights/insights.sh.j2 index 1802bacc8e6..38393cbb1d5 100644 --- a/playbooks/roles/insights/templates/edx/app/insights/insights.sh.j2 +++ b/playbooks/roles/insights/templates/edx/app/insights/insights.sh.j2 @@ -10,10 +10,12 @@ {% endif %} {% if COMMON_ENABLE_NEWRELIC_APP %} +export NEW_RELIC_DISTRIBUTED_TRACING_ENABLED="{{ INSIGHTS_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}" export NEW_RELIC_APP_NAME="{{ INSIGHTS_NEWRELIC_APPNAME }}" export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}" {% endif -%} source {{ insights_app_dir }}/insights_env -{{ executable }} --pythonpath={{ insights_python_path }} -b {{ insights_gunicorn_host }}:{{ insights_gunicorn_port }} -w {{ INSIGHTS_GUNICORN_WORKERS }} --timeout={{ insights_gunicorn_timeout }} {{ INSIGHTS_GUNICORN_EXTRA }} {{ insights_wsgi }} +# We exec so that gunicorn is the child of supervisor and can be managed properly +exec {{ executable }} --pythonpath={{ insights_python_path }} -b {{ insights_gunicorn_host }}:{{ insights_gunicorn_port }} -w {{ INSIGHTS_GUNICORN_WORKERS }} --timeout={{ insights_gunicorn_timeout }} {{ INSIGHTS_GUNICORN_EXTRA }} {{ insights_wsgi }} diff --git a/playbooks/roles/jenkins_admin/defaults/main.yml b/playbooks/roles/jenkins_admin/defaults/main.yml index 61b102afa64..c46a108fbdc 100644 --- a/playbooks/roles/jenkins_admin/defaults/main.yml +++ b/playbooks/roles/jenkins_admin/defaults/main.yml @@ -30,7 +30,8 @@ JENKINS_ADMIN_AWS_CREDENTIALS: !!null jenkins_admin_role_name: jenkins_admin -jenkins_admin_version: "1.658" +JENKINS_ADMIN_VERSION: "1.658" +jenkins_admin_version: "{{ JENKINS_ADMIN_VERSION }}" # # OS packages # diff --git a/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/aws_config.j2 b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/aws_config.j2 index 2ebf8796e63..8700534b4dc 100644 --- a/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/aws_config.j2 +++ b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/aws_config.j2 @@ -1,4 +1,4 @@ -{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.iteritems() %} +{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.items() %} [profile {{ deployment }}] aws_access_key_id = {{ creds.access_id }} aws_secret_access_key = {{ creds.secret_key }} diff --git a/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/boto.j2 b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/boto.j2 index 2ebf8796e63..8700534b4dc 100644 --- a/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/boto.j2 +++ b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/boto.j2 @@ -1,4 +1,4 @@ -{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.iteritems() %} +{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.items() %} [profile {{ deployment }}] aws_access_key_id = {{ creds.access_id }} aws_secret_access_key = {{ creds.secret_key }} diff --git a/playbooks/roles/jenkins_analytics/templates/jenkins.config.main.xml b/playbooks/roles/jenkins_analytics/templates/jenkins.config.main.xml index 528d374c3ad..eddc5d0dacd 100644 --- a/playbooks/roles/jenkins_analytics/templates/jenkins.config.main.xml +++ b/playbooks/roles/jenkins_analytics/templates/jenkins.config.main.xml @@ -10,7 +10,7 @@ {% else %} - {% for permission_group, permissions in jenkins_auth_permissions.iteritems() %} + {% for permission_group, permissions in jenkins_auth_permissions.items() %} {% for permission in permissions %} {% for user in jenkins_auth_users[permission_group] | default([]) %} {{ permission }}:{{ user }} diff --git a/playbooks/roles/jenkins_analytics/templates/seedJob.groovy b/playbooks/roles/jenkins_analytics/templates/seedJob.groovy index 7946d65dfcf..0dd4576684c 100644 --- a/playbooks/roles/jenkins_analytics/templates/seedJob.groovy +++ b/playbooks/roles/jenkins_analytics/templates/seedJob.groovy @@ -13,15 +13,17 @@ job('{{ jenkins_seed_job.name }}') { remote { url('{{ scm.url }}') branch("{{ scm.branch | default('master') }}") - {% if scm.dest %} - relativeTargetDir('{{ scm.dest }}') - {% endif %} {% if scm.credential_id %} credentials('{{ scm.credential_id }}') {% endif %} } - clean(true) - pruneBranches(true) + extensions { + {% if scm.dest %} + relativeTargetDirectory('{{ scm.dest }}') + {% endif %} + cleanAfterCheckout() + pruneBranches() + } } {% endif %} {% endfor %} diff --git a/playbooks/roles/jenkins_build/defaults/main.yml b/playbooks/roles/jenkins_build/defaults/main.yml index dd80609dde9..26e8b9fb847 100644 --- a/playbooks/roles/jenkins_build/defaults/main.yml +++ b/playbooks/roles/jenkins_build/defaults/main.yml @@ -1,31 +1,37 @@ build_jenkins_user_uid: 1002 build_jenkins_group_gid: 1004 -build_jenkins_version: jenkins_2.89.4 +BUILD_JENKINS_VERSION: jenkins_2.222.3 +build_jenkins_version: "{{ BUILD_JENKINS_VERSION }}" build_jenkins_jvm_args: '-Djava.awt.headless=true -Xmx16384m -DsessionTimeout=60' + +build_jenkins_python_versions: + - python3.5-dev + build_jenkins_configuration_scripts: - 1addJarsToClasspath.groovy - - 2checkInstalledPlugins.groovy - 3importCredentials.groovy + - 3installGroovy.groovy + - 3installPython.groovy - 3mainConfiguration.groovy - 3setGlobalProperties.groovy - - 3shutdownCLI.groovy - 4configureEc2Plugin.groovy - 4configureGHOAuth.groovy - 4configureGHPRB.groovy - 4configureGit.groovy - 4configureGithub.groovy - - 4configureHipChat.groovy - - 4configureJobConfigHistory.groovy - 4configureMailerPlugin.groovy - 4configureMaskPasswords.groovy - 4configureSecurity.groovy + - 4configureSlack.groovy - 4configureSplunk.groovy + - 4configureTimestamper.groovy + - 5configureEmailExtension.groovy - 5createLoggers.groovy # plugins build_jenkins_plugins_list: - name: 'analysis-core' - version: '1.95' + version: '1.96' group: 'org.jvnet.hudson.plugins' - name: 'ansicolor' version: '0.5.2' @@ -36,20 +42,20 @@ build_jenkins_plugins_list: - name: 'antisamy-markup-formatter' version: '1.5' group: 'org.jenkins-ci.plugins' - - name: 'bouncycastle-api' - version: '2.16.1' + - name: 'aws-credentials' + version: '1.24' group: 'org.jenkins-ci.plugins' - - name: 'build-flow-plugin' - version: '0.20' - group: 'com.cloudbees.plugins' - - name: 'build-flow-test-aggregator' - version: '1.0' - group: 'org.zeroturnaround.jenkins' - - name: 'build-flow-toolbox-plugin' - version: '0.1' + - name: 'aws-java-sdk' + version: '1.11.700' + group: 'org.jenkins-ci.plugins' + - name: 'badge' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'bouncycastle-api' + version: '2.18' group: 'org.jenkins-ci.plugins' - name: 'buildgraph-view' - version: '1.1.1' + version: '1.8' group: 'org.jenkins-ci.plugins' - name: 'build-name-setter' version: '1.3' @@ -61,13 +67,13 @@ build_jenkins_plugins_list: version: '1.5' group: 'org.jenkins-ci.plugins' - name: 'cobertura' - version: '1.11' + version: '1.16' group: 'org.jenkins-ci.plugins' - name: 'copyartifact' version: '1.39' group: 'org.jenkins-ci.plugins' - name: 'credentials' - version: '2.1.16' + version: '2.3.0' group: 'org.jenkins-ci.plugins' - name: 'credentials-binding' version: '1.15' @@ -79,10 +85,13 @@ build_jenkins_plugins_list: version: '1.8' group: 'org.jenkins-ci.plugins' - name: 'ec2' - version: '1.28' + version: '1.49.1' + group: 'org.jenkins-ci.plugins' + - name: 'email-ext' + version: '2.66' group: 'org.jenkins-ci.plugins' - name: 'envinject' - version: '2.1.5' + version: '2.3.0' group: 'org.jenkins-ci.plugins' - name: 'exclusive-execution' version: '0.8' @@ -94,70 +103,79 @@ build_jenkins_plugins_list: version: '0.15.2' group: 'org.jenkins-ci.plugins' - name: 'ghprb' - version: '1.42.0' + version: '1.42.1' group: 'org.jenkins-ci.plugins' - name: 'git' - version: '3.8.0' + version: '4.2.2' + group: 'org.jenkins-ci.plugins' + - name: 'git-client' + version: '3.0.0' group: 'org.jenkins-ci.plugins' - name: 'github' - version: '1.28.1' + version: '1.29.2' group: 'com.coravy.hudson.plugins.github' - name: 'github-api' - version: '1.90' + version: '1.111' group: 'org.jenkins-ci.plugins' - name: 'github-branch-source' - version: '2.3.3' + version: '2.3.6' group: 'org.jenkins-ci.plugins' - name: 'github-oauth' - version: '0.29' + version: '0.33' group: 'org.jenkins-ci.plugins' - name: 'gradle' - version: '1.24' + version: '1.29' group: 'org.jenkins-ci.plugins' - name: 'groovy' - version: '2.0' + version: '2.2' group: 'org.jenkins-ci.plugins' - name: 'groovy-postbuild' version: '2.4' group: 'org.jvnet.hudson.plugins' - - name: 'hipchat' - version: '0.1.9' - group: 'org.jvnet.hudson.plugins' - - name: 'hockeyapp' - version: '1.2.2' - group: 'org.jenkins-ci.plugins' - name: 'htmlpublisher' - version: '1.16' + version: '1.21' + group: 'org.jenkins-ci.plugins' + - name: 'jackson2-api' + version: '2.10.2' group: 'org.jenkins-ci.plugins' - name: 'javadoc' version: '1.3' group: 'org.jenkins-ci.plugins' + - name: 'jobConfigHistory' + version: '2.24' + group: 'org.jenkins-ci.plugins' + - name: 'jdk-tool' + version: '1.2' + group: 'org.jenkins-ci.plugins' - name: 'job-dsl' - version: '1.67' + version: '1.77' group: 'org.jenkins-ci.plugins' - name: 'junit' - version: '1.24' + version: '1.28' group: 'org.jenkins-ci.plugins' - name: 'ldap' version: '1.20' group: 'org.jenkins-ci.plugins' + - name: 'lockable-resources' + version: '2.5' + group: 'org.6wind.jenkins' - name: 'mailer' - version: '1.21' + version: '1.23' group: 'org.jenkins-ci.plugins' - name: 'mask-passwords' - version: '2.10.1' + version: '2.13' group: 'org.jenkins-ci.plugins' - name: 'matrix-auth' - version: '1.5' + version: '2.3' group: 'org.jenkins-ci.plugins' - name: 'matrix-project' - version: '1.12' + version: '1.14' group: 'org.jenkins-ci.plugins' - name: 'maven-plugin' - version: '3.1' + version: '3.4' group: 'org.jenkins-ci.main' - name: 'monitoring' - version: '1.71.0' + version: '1.76.0' group: 'org.jvnet.hudson.plugins' - name: 'multiple-scms' version: '0.6' @@ -166,67 +184,73 @@ build_jenkins_plugins_list: version: '1.7.2' group: 'org.jenkins-ci.plugins' - name: 'pam-auth' - version: '1.2' + version: '1.5.1' group: 'org.jenkins-ci.plugins' - name: 'parameterized-trigger' version: '2.35.2' group: 'org.jenkins-ci.plugins' - name: 'pipeline-model-definition' - version: '1.2.9' + version: '1.3.4.1' group: 'org.jenkinsci.plugins' - - name: 'pipeline-build-step' - version: '2.5.1' - group: 'org.jenkins-ci.plugins' - name: 'pipeline-utility-steps' version: '2.0.2' group: 'org.jenkins-ci.plugins' - name: 'PrioritySorter' version: '2.9' group: 'org.jenkins-ci.plugins' + - name: 'promoted-builds' + version: '3.5' + group: 'org.jenkins-ci.plugins' + - name: 'pyenv-pipeline' + version: '2.0.1' + group: 'org.jenkins-ci.plugins' - name: 'rebuild' - version: '1.25' + version: '1.31' group: 'com.sonyericsson.hudson.plugins.rebuild' + - name: 'resource-disposer' + version: '0.12' + group: 'org.jenkins-ci.plugins' - name: 'run-condition' version: '1.0' group: 'org.jenkins-ci.plugins' - name: 'script-security' - version: '1.42' + version: '1.71' group: 'org.jenkins-ci.plugins' - name: 'shiningpanda' version: '0.23' group: 'org.jenkins-ci.plugins' + - name: 'slack' + version: '2.21' + group: 'org.jenkins-ci.plugins' - name: 'splunk-devops' - version: '1.6.4' + version: '1.8.1' + group: 'com.splunk.splunkins' + - name: 'splunk-devops-extend' + version: '1.8.1' group: 'com.splunk.splunkins' - name: 'ssh-agent' - version: '1.14' + version: '1.17' group: 'org.jenkins-ci.plugins' - name: 'ssh-credentials' - version: '1.13' + version: '1.17.3' group: 'org.jenkins-ci.plugins' - name: 'ssh-slaves' - version: '1.26' - group: 'org.jenkins-ci.plugins' - - name: 'structs' - version: '1.14' + version: '1.28.1' group: 'org.jenkins-ci.plugins' - name: 'subversion' - version: '2.10.3' + version: '2.13.1' group: 'org.jenkins-ci.plugins' - name: 'text-finder' version: '1.10' group: 'org.jenkins-ci.plugins' - - name: 'thinBackup' - version: '1.7.4' - group: 'org.jvnet.hudson.plugins' - name: 'throttle-concurrents' version: '2.0.1' group: 'org.jenkins-ci.plugins' - name: 'timestamper' - version: '1.8.9' + version: '1.11.2' group: 'org.jenkins-ci.plugins' - name: 'token-macro' - version: '2.3' + version: '2.10' group: 'org.jenkins-ci.plugins' - name: 'translation' version: '1.16' @@ -235,25 +259,31 @@ build_jenkins_plugins_list: version: '0.7.11' group: 'org.jenkins-ci.plugins' - name: 'warnings' - version: '4.68' + version: '5.0.1' group: 'org.jvnet.hudson.plugins' + - name: 'warnings-ng' + version: '6.0.3' + group: 'io.jenkins.plugins' - name: 'workflow-aggregator' - version: '2.5' + version: '2.6' group: 'org.jenkins-ci.plugins.workflow' - name: 'windows-slaves' version: '1.3.1' group: 'org.jenkins-ci.plugins' - name: 'workflow-cps' - version: '2.46' + version: '2.80' group: 'org.jenkins-ci.plugins.workflow' - - name: 'workflow-durable-task-step' - version: '2.18' + - name: 'workflow-cps-global-lib' + version: '2.15' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-job' + version: '2.35' group: 'org.jenkins-ci.plugins.workflow' - name: 'workflow-support' - version: '2.18' + version: '3.3' group: 'org.jenkins-ci.plugins.workflow' - name: 'ws-cleanup' - version: '0.34' + version: '0.37' group: 'org.jenkins-ci.plugins' - name: 'xunit' version: '1.93' @@ -269,9 +299,6 @@ build_jenkins_ghprb_cron_schedule: 'H/5 * * * *' # github JENKINS_GITHUB_CONFIG: '' -# hipchat -build_jenkins_hipchat_room: 'testeng' - # ec2 build_jenkins_instance_cap: '500' @@ -307,31 +334,18 @@ build_jenkins_log_list: - name: 'hudson.plugins.git.GitSCM' log_level: 'ALL' -# job config history -build_jenkins_history_max_days: '15' -build_jenkins_history_exclude_pattern: 'queue|nodeMonitors|UpdateCenter|global-build-stats|GhprbTrigger' - # splunk -build_jenkins_splunk_ignored_jobs: '(^((?!edx-(platform|e2e)).)*$)|.*private.*' +build_jenkins_splunk_ignored_jobs: '(^((?!edx-(platform|e2e)|gather|ironwood).)*$)|.*private.*' build_jenkins_splunk_script_type: 'inline' build_jenkins_splunk_script_path: 'splunk/splunk.groovy' build_jenkins_splunk_file_path: '{{ role_path }}/../jenkins_build/files/splunk/splunk.groovy' -build_jenkins_splunk_metadata: - - data_source: 'default' - config_item: 'sourcetype' - value: 'json:jenkins:old' - - data_source: 'BUILD_REPORT' - config_item: 'sourcetype' - value: 'build_report' - - data_source: 'BUILD_EVENT' - config_item: 'sourcetype' - value: 'build_event' - - data_source: 'CONSOLE_LOG' - config_item: 'sourcetype' - value: 'console_log' - - data_source: 'QUEUE_INFO' - config_item: 'sourcetype' - value: 'queue_information' - - data_source: 'SLAVE_INFO' - config_item: 'sourcetype' - value: 'slave_information' +build_jenkins_splunk_metadata: [] + +# timestamper +build_jenkins_timestamper_system_time: "''HH:mm:ss' '" +build_jenkins_timestamper_elapsed: "''HH:mm:ss.S' '" +build_jenkins_timestamper_pipelines_enabled: true + +BUILD_JENKINS_SPLUNK_ENABLED: false +build_jenkins_splunk_master_hostname: "" +build_jenkins_splunk_event_source: "" diff --git a/playbooks/roles/jenkins_build/files/splunk/splunk.groovy b/playbooks/roles/jenkins_build/files/splunk/splunk.groovy index 968ec345afc..a0e48ce2b83 100644 --- a/playbooks/roles/jenkins_build/files/splunk/splunk.groovy +++ b/playbooks/roles/jenkins_build/files/splunk/splunk.groovy @@ -4,3 +4,6 @@ splunkins.sendTestReport(50) // Send paver timing logs to Splunk splunkins.archive("**/timing*.log", null, false, "10MB") + +// Index the codecov metrics generated by gather-codecov-metrics +splunkins.archive("**/codecov_metrics.json", null, false, "1MB") diff --git a/playbooks/roles/jenkins_build/meta/main.yml b/playbooks/roles/jenkins_build/meta/main.yml index 99835f096b2..4df5669ec42 100644 --- a/playbooks/roles/jenkins_build/meta/main.yml +++ b/playbooks/roles/jenkins_build/meta/main.yml @@ -15,12 +15,9 @@ dependencies: jenkins_common_ghprb_skip_phrase: '{{ build_jenkins_ghprb_skip_phrase }}' jenkins_common_ghprb_cron_schedule: '{{ build_jenkins_ghprb_cron_schedule }}' jenkins_common_github_configs: '{{ JENKINS_GITHUB_CONFIG }}' - jenkins_common_hipchat_room: '{{ build_jenkins_hipchat_room }}' jenkins_common_instance_cap: '{{ build_jenkins_instance_cap }}' jenkins_common_seed_name: '{{ build_jenkins_seed_name }}' jenkins_common_log_list: '{{ build_jenkins_log_list }}' - jenkins_common_history_max_days: '{{ build_jenkins_history_max_days }}' - jenkins_common_history_exclude_pattern: '{{ build_jenkins_history_exclude_pattern }}' jenkins_common_server_name: '{{ JENKINS_SERVER_NAME }}' jenkins_common_splunk_ignored_jobs: '{{ build_jenkins_splunk_ignored_jobs }}' jenkins_common_splunk_script_type: '{{ build_jenkins_splunk_script_type }}' @@ -30,3 +27,8 @@ dependencies: jenkins_common_splunk_event_source: '{{ build_jenkins_splunk_event_source }}' jenkins_common_splunk_enabled: '{{ BUILD_JENKINS_SPLUNK_ENABLED }}' jenkins_common_splunk_file_path: '{{ build_jenkins_splunk_file_path }}' + jenkins_common_email_replyto: '{{ JENKINS_MAILER_REPLY_TO_ADDRESS }}' + jenkins_common_python_versions: '{{ build_jenkins_python_versions }}' + jenkins_common_timestamper_system_clock_format: '{{ build_jenkins_timestamper_system_time }}' + jenkins_common_timestamper_elapsed_time_format: '{{ build_jenkins_timestamper_elapsed }}' + jenkins_common_timestamper_enabled_on_pipelines: '{{ build_jenkins_timestamper_pipelines_enabled }}' diff --git a/playbooks/roles/jenkins_common/defaults/main.yml b/playbooks/roles/jenkins_common/defaults/main.yml index e604be029c2..1850ab134b5 100644 --- a/playbooks/roles/jenkins_common/defaults/main.yml +++ b/playbooks/roles/jenkins_common/defaults/main.yml @@ -1,14 +1,20 @@ jenkins_common_user: jenkins jenkins_common_group: jenkins +jenkins_common_groups: '{{ jenkins_common_group }}' jenkins_common_home: /var/lib/jenkins jenkins_common_config_path: '{{ jenkins_common_home }}/init-configs' jenkins_common_port: 8080 -jenkins_common_version: jenkins_1.651.3 +JENKINS_COMMON_VERSION: jenkins_1.651.3 +jenkins_common_version: "{{ JENKINS_COMMON_VERSION }}" jenkins_common_war_source: https://s3.amazonaws.com/edx-testeng-tools/jenkins jenkins_common_nginx_port: 80 jenkins_common_protocol_https: true +# When checking if Jenkins is finished initializing, expect a 200 as it should +# be publicly available +jenkins_common_ready_status_code: 200 JENKINS_SERVER_NAME: jenkins.example.org +jenkins_node_usage: EXCLUSIVE jenkins_common_debian_pkgs: - nginx @@ -18,6 +24,8 @@ jenkins_common_debian_pkgs: - daemon - psmisc +jenkins_common_python_versions: [] + jenkins_common_configuration_git_url: https://github.com/edx/jenkins-configuration.git jenkins_common_jenkins_configuration_branch: master jenkins_common_configuration_src_path: src/main/groovy @@ -27,24 +35,31 @@ jenkins_common_configuration_scripts: [] jenkins_common_non_plugin_template_files: - credentials - ec2_config + - email_ext_config - ghprb_config - git_config - github_config - github_oauth - - hipchat_config + - groovy_config - job_config_history - log_config - mailer_config - main_config - mask_passwords_config - properties_config + - python_config - security - seed_config + - slack_config - splunk_config + - timestamper_config # Jenkins default config values jenkins_common_jvm_args: '' +# Users +JENKINS_USER_LIST: [] + # main jenkins_common_main_system_message: '' jenkins_common_main_num_executors: 1 @@ -69,6 +84,19 @@ jenkins_common_system_properties: JENKINS_MAIN_URL: 'https://jenkins.example.org/' JENKINS_MAIN_ADMIN_EMAIL: 'jenkins ' +# global tool configurations +jenkins_common_groovy_installations: + - NAME: 'DEFAULT_GROOVY' + HOME: '' + VERSION: '2.5.1' +jenkins_common_python_installations: + - PYTHON_ALIAS: 'System-CPython-2.7' + PYTHON_PATH: '/usr/bin/python2.7' + PYTHON_PROPERTIES: [] + - PYTHON_ALIAS: 'PYTHON_3.5' + PYTHON_PATH: '/usr/bin/python3.5' + PYTHON_PROPERTIES: [] + # plugins jenkins_common_plugins_list: [] @@ -78,8 +106,9 @@ jenkins_common_instance_cap: '' JENKINS_EC2_PRIVATE_KEY: '' JENKINS_EC2_REGION: '' -JENKINS_EC2_ACCESS_KEY_ID: '' -JENKINS_EC2_SECRET_ACCESS_KEY: '' +JENKINS_EC2_CREDENTIAL_ID: '' +JENKINS_EC2_ROLE_ARN: '' +JENKINS_EC2_ROLE_SESSION_NAME: '' JENKINS_EC2_AMIS: [] # ghprb @@ -122,14 +151,19 @@ JENKINS_SECRET_FILES_LIST: [] JENKINS_USERNAME_PASSWORD_LIST: [] JENKINS_SECRET_TEXT_LIST: [] JENKINS_CERTIFICATES_LIST: [] -JENKINS_MASTER_SSH_LIST: [] -JENKINS_CUSTOM_SSH_LIST: [] +JENKINS_SSH_LIST: [] +JENKINS_AWS_LIST: [] # security jenkins_common_dsl_script_security_enabled: true jenkins_common_security_agent_protocols: - 'JNLP4-connect' jenkins_common_security_agent_jnlp_tcp_port: 0 +JENKINS_CSRF_PROTECTION_ENABLED: false +# proxy compatibility will exclude the client ip from crumbs. +# this is useful is dealing with a proxy that filters them +# in requests +JENKINS_CSRF_PROXY_COMPATIBILITY: false JENKINS_SECURITY_GROUPS: [] @@ -146,20 +180,15 @@ jenkins_common_github_configs: CACHE_SIZE: 20 # github oauth settings - jenkins_common_security_scopes: 'read:org,user:email' JENKINS_SECURITY_CLIENT_ID: '' JENKINS_SECURITY_CLIENT_SECRET: '' -# hipchat -jenkins_common_hipchat_room: '' -jenkins_common_hipchat_v2_enabled: true -JENKINS_HIPCHAT_API_TOKEN: '' - # seed jenkins_common_seed_name: 'seed_job' jenkins_common_seed_path: '{{ jenkins_common_config_path }}/xml/seed_job.xml' +jenkins_common_seed_job_source: '{{ role_path }}/files/xml/*' # logs jenkins_common_log_list: @@ -189,10 +218,59 @@ JENKINS_MAILER_DEFAULT_SUFFIX: '@example.com' JENKINS_MAILER_SMTP_AUTH_USERNAME: '' JENKINS_MAILER_SMTP_AUTH_PASSWORD: '' +# email ext plugin +jenkins_common_email_advanced_properties: '' +jenkins_common_email_content_type: 'text/plain' +jenkins_common_default_email_subject: '${PROJECT_NAME} #${BUILD_NUMBER} is ${BUILD_STATUS}' +jenkins_common_email_emergency_reroute: '' +jenkins_common_email_replyto: '' +jenkins_common_email_debug_mode: 'false' +jenkins_common_email_max_attachment_size: 10 +jenkins_common_email_default_recipients: '' +jenkins_common_email_add_precedence_bulk: 'true' +jenkins_common_email_allowed_domains: '' +jenkins_common_email_excluded_committers: '' +jenkins_common_email_require_admin_for_template_testing: 'true' +jenkins_common_email_watching_enabled: '' +jenkins_common_email_allow_unregistered_enabled: '' +jenkins_common_email_use_list_id: '' +jenkins_common_email_list_id: '' +jenkins_common_email_triggers: + - 'AbortedTrigger' + - 'FailureTrigger' + - 'FixedTrigger' +# if you wish to set the following 3 values, supply paths to +# individual files with the content you want to specify +jenkins_common_email_default_body_path: '' +jenkins_common_email_default_presend_script_path: '' +jenkins_common_email_default_postsend_script_path: '' + # mask passwords JENKINS_MASK_PASSWORDS_CLASSES: [] JENKINS_MASK_PASSWORDS_PAIRS: [] +# SAML +JENKINS_SAML_IDP_METADATA: | + dummy data +JENKINS_SAML_DISPLAY_NAME_ATTRIBUTE: '' +JENKINS_SAML_GROUP_ATTRIBUTE: '' +JENKINS_SAML_MAX_AUTH_LIFETIME_SECONDS: 60 +JENKINS_SAML_USERNAME_ATTRIBUTE: '' +JENKINS_SAML_EMAIL_ATTRIBUTE: '' +JENKINS_SAML_LOGOUT_URL: '' +JENKINS_SAML_ADVANCED_CONFIGURATION: {} +JENKINS_SAML_ENCRYPTION_DATA: {} +JENKINS_SAML_USERNAME_CASE_CONVENTION: 'None' +JENKINS_SAML_BINDING: 'POST' +JENKINS_SAML_CUSTOM_ATTRIBUTES: [] + +# SLACK +JENKINS_SLACK_BASE_URL: '' +JENKINS_IS_SLACK_BOT: 'true' +JENKINS_SLACK_ROOM: '' +JENKINS_SLACK_TEAM_DOMAIN: '' +JENKINS_SLACK_CREDENTIAL_ID: '' + # splunk jenkins_common_splunk_enabled: true jenkins_common_splunk_use_ssl: true @@ -205,11 +283,13 @@ jenkins_common_splunk_event_source: '' jenkins_common_splunk_script_type: 'path' jenkins_common_splunk_script_path: '' jenkins_common_splunk_file_path: '{{ role_path }}/files/splunk/*' -jenkins_common_splunk_metadata: - - data_source: 'Default' - config_item: 'Source Type' - value: 'json:jenkins:old' +jenkins_common_splunk_metadata: [] JENKINS_SPLUNK_HOSTNAME: '' JENKINS_SPLUNK_PORT: 8088 JENKINS_SPLUNK_APP_URL: '' JENKINS_SPLUNK_TOKEN: '' + +# timestamper +jenkins_common_timestamper_system_clock_format: "''HH:mm:ss' '" +jenkins_common_timestamper_elapsed_time_format: "''HH:mm:ss.S' '" +jenkins_common_timestamper_enabled_on_pipelines: true diff --git a/playbooks/roles/jenkins_common/files/xml/seed_job.xml b/playbooks/roles/jenkins_common/files/xml/seed_job.xml index ec7d2e9321f..018e113ee14 100644 --- a/playbooks/roles/jenkins_common/files/xml/seed_job.xml +++ b/playbooks/roles/jenkins_common/files/xml/seed_job.xml @@ -16,7 +16,7 @@ false -1 - + false false @@ -35,7 +35,7 @@ - + 2 @@ -69,7 +69,7 @@ fi - + tert libs @@ -82,7 +82,7 @@ assemble true true - + ${DSL_SCRIPT} false false diff --git a/playbooks/roles/jenkins_common/tasks/main.yml b/playbooks/roles/jenkins_common/tasks/main.yml index e239d8d71ea..501c01924cd 100644 --- a/playbooks/roles/jenkins_common/tasks/main.yml +++ b/playbooks/roles/jenkins_common/tasks/main.yml @@ -10,6 +10,17 @@ - install - install:system-requirements +- name: Install specific versions of python + apt: + name: '{{ item }}' + state: present + update_cache: yes + with_items: '{{ jenkins_common_python_versions }}' + tags: + - jenkins + - install + - install:system-requirements + - name: Create jenkins group with specified gid group: name: '{{ jenkins_common_group }}' @@ -34,7 +45,7 @@ name: '{{ jenkins_common_user }}' append: yes uid: '{{ jenkins_common_user_uid }}' - groups: '{{ jenkins_common_group }}' + groups: '{{ jenkins_common_groups }}' when: jenkins_common_user_uid is defined tags: - install @@ -44,12 +55,43 @@ user: name: '{{ jenkins_common_user }}' append: yes - groups: '{{ jenkins_common_group }}' + groups: '{{ jenkins_common_groups }}' when: jenkins_common_user_uid is not defined or not jenkins_common_user_uid tags: - install - install:system-requirements +- name: Create jenkins home and set ownership + file: + path: "{{ jenkins_common_home }}" + state: directory + owner: "{{ jenkins_common_user }}" + group: "{{ jenkins_common_group }}" + mode: 0700 + tags: + - install + - install:system-requirements + +- name: set nofile soft limit for the user jenkins + pam_limits: + domain: "{{ jenkins_common_user }}" + limit_type: soft + limit_item: nofile + value: 4096 + tags: + - install + - install:system-requirements + +- name: set nofile hard limit for the user jenkins + pam_limits: + domain: "{{ jenkins_common_user }}" + limit_type: hard + limit_item: nofile + value: 8096 + tags: + - install + - install:system-requirements + - name: Create /var/run/jenkins file: path: "/var/run/jenkins" @@ -68,11 +110,13 @@ state: absent with_items: - '{{ jenkins_common_home }}/init.groovy.d' + - '{{ jenkins_common_home }}/plugins' + - '{{ jenkins_common_home }}/utils' - '{{ jenkins_common_config_path }}' tags: - install - install:base - - install:jenkins-configuration + - install:plugins - jenkins:local-dev - name: Create necessary folders @@ -93,7 +137,7 @@ tags: - install - install:base - - install:jenkins-configuration + - install:plugins - jenkins:local-dev - name: Download Jenkins war file @@ -144,7 +188,7 @@ tags: - install - install:base - - install:jenkins-configuration + - install:plugins - jenkins:local-dev - name: Run gradle libs @@ -159,7 +203,7 @@ tags: - install - install:base - - install:jenkins-configuration + - install:plugins - jenkins:local-dev - name: Copy init scripts into init.groovy.d @@ -171,14 +215,14 @@ tags: - install - install:base - - install:jenkins-configuration + - install:plugins -- name: Copy all init scripts other than oauth for local dev +- name: Copy all init scripts other than oauth and security for local dev command: 'cp {{ jenkins_common_git_home }}/jenkins-configuration/{{ jenkins_common_configuration_src_path }}/{{ item }} {{ jenkins_common_home }}/init.groovy.d/' with_items: '{{ jenkins_common_configuration_scripts }}' become: true become_user: '{{ jenkins_common_user }}' - when: 'item != "4configureGHOAuth.groovy" and init_scripts_copied is not defined' + when: 'item != "4configureGHOAuth.groovy" and item != "4configureSecurity.groovy" and init_scripts_copied is not defined' tags: - jenkins:local-dev @@ -196,7 +240,7 @@ tags: - install - install:base - - install:jenkins-configuration + - install:plugins - jenkins:local-dev - name: Copy non plugins template files @@ -210,16 +254,16 @@ tags: - install - install:base - - install:jenkins-configuration + - install:plugins -- name: For local dev, copy any config files other than oauth +- name: For local dev, copy any config files other than oauth and security template: src: '{{ role_path }}/templates/config/{{ item }}.yml.j2' dest: '{{ jenkins_common_config_path }}/{{ item }}.yml' owner: '{{ jenkins_common_user }}' group: '{{ jenkins_common_group }}' with_items: '{{ jenkins_common_non_plugin_template_files }}' - when: 'item != "security" and templates_copied is not defined' + when: 'item != "github_oauth" and item != "security" and templates_copied is not defined' tags: - jenkins:local-dev @@ -243,7 +287,6 @@ - install - install:base - install:plugins - - install:jenkins-configuration - jenkins:local-dev - name: Copy ec2 config files @@ -257,7 +300,7 @@ tags: - install - install:base - - install:jenkins-configuration + - install:plugins - jenkins:local-dev - name: Copy xml config files @@ -267,11 +310,11 @@ owner: '{{ jenkins_common_user }}' group: '{{ jenkins_common_group }}' with_fileglob: - - '{{ role_path }}/files/xml/*' + - '{{ jenkins_common_seed_job_source }}' tags: - install - install:base - - install:jenkins-configuration + - install:plugins - jenkins:local-dev - name: Copy splunk config script @@ -285,10 +328,10 @@ tags: - install - install:base - - install:jenkins-configuration + - install:plugins - jenkins:local-dev -- name: Run plugins.gradle +- name: Run plugins.gradle to download plugins shell: './gradlew -b plugins.gradle plugins' args: chdir: '{{ jenkins_common_git_home }}/jenkins-configuration' @@ -301,7 +344,6 @@ - install - install:base - install:plugins - - install:jenkins-configuration - jenkins:local-dev - name: Copy secret file credentials @@ -313,21 +355,7 @@ tags: - install - install:base - - install:jenkins-configuration - - jenkins:local-dev - -- name: Copy ssh key credentials - copy: - content: "{{ item.content }}" - dest: '{{ jenkins_common_config_path }}/credentials/{{ item.name }}' - owner: '{{ jenkins_common_user }}' - group: '{{ jenkins_common_group }}' - with_items: '{{ JENKINS_CUSTOM_SSH_LIST }}' - no_log: yes - tags: - - install - - install:base - - install:jenkins-configuration + - install:plugins - jenkins:local-dev - name: Copy ec2 key @@ -340,7 +368,7 @@ tags: - install - install:base - - install:jenkins-configuration + - install:plugins - jenkins:local-dev - name: Start Jenkins Service @@ -348,9 +376,34 @@ name: jenkins daemon_reload: yes state: restarted + enabled: yes tags: - manage - manage:start - install:plugins - - install:jenkins-configuration - jenkins:promote-to-production + +- name: Wait until the Jenkins service has fully initialized + uri: + url: "http://127.0.0.1:{{ jenkins_common_port }}" + status_code: '{{ jenkins_common_ready_status_code }}' + register: result + until: result.status == jenkins_common_ready_status_code + retries: 600 + delay: 1 + tags: + - install:base + - install:plugins + +- name: Delete any existing jenkins-configuration folders to avoid unwanted configuration + file: + path: '{{ item }}' + owner: '{{ jenkins_common_user }}' + group: '{{ jenkins_common_group }}' + state: absent + with_items: + - '{{ jenkins_common_home }}/init.groovy.d' + - '{{ jenkins_common_config_path }}' + tags: + - install:base + - install:plugins diff --git a/playbooks/roles/jenkins_common/templates/config/credentials.yml.j2 b/playbooks/roles/jenkins_common/templates/config/credentials.yml.j2 index 2c1d7d7fa2a..729b14433c1 100644 --- a/playbooks/roles/jenkins_common/templates/config/credentials.yml.j2 +++ b/playbooks/roles/jenkins_common/templates/config/credentials.yml.j2 @@ -30,22 +30,23 @@ description: '{{ cert.description }}' id: '{{ cert.id }}' {% endfor %} -{% for master_ssh in JENKINS_MASTER_SSH_LIST %} +{% for ssh in JENKINS_SSH_LIST %} - credentialType: 'ssh' - scope: '{{ master_ssh.scope }}' - username: '{{ master_ssh.username }}' - isJenkinsMasterSsh: true - passphrase: '{{ master_ssh.passphrase }}' - description: '{{ master_ssh.description }}' - id: '{{ master_ssh.id }}' + scope: '{{ ssh.scope }}' + username: '{{ ssh.username }}' + sshKey: | + {{ ssh.sshKey | indent(4) }} + passphrase: '{{ ssh.passphrase }}' + description: '{{ ssh.description }}' + id: '{{ ssh.id }}' {% endfor %} -{% for custom_ssh in JENKINS_CUSTOM_SSH_LIST %} -- credentialType: 'ssh' - scope: '{{ custom_ssh.scope }}' - username: '{{ custom_ssh.username }}' - isJenkinsMasterSsh: false - path: 'credentials/{{ custom_ssh.name }}' - passphrase: '{{ custom_ssh.passphrase }}' - description: '{{ custom_ssh.description }}' - id: '{{ custom_ssh.id }}' +{% for aws in JENKINS_AWS_LIST %} +- credentialType: 'aws' + scope: '{{ aws.scope }}' + id: '{{ aws.id }}' + accessKeyId: '{{ aws.accessKeyId }}' + secretAccessKey: '{{ aws.secretAccessKey }}' + description: '{{ aws.description }}' + iamRole: '{{ aws.iamRole }}' + mfaSerialNumber: '{{ aws.iamRole }}' {% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/ec2_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/ec2_config.yml.j2 index ebc63bcf6ed..efbe86cdd1c 100644 --- a/playbooks/roles/jenkins_common/templates/config/ec2_config.yml.j2 +++ b/playbooks/roles/jenkins_common/templates/config/ec2_config.yml.j2 @@ -1,12 +1,13 @@ --- CLOUDS: - NAME: '{{ JENKINS_EC2_REGION }}' - ACCESS_KEY_ID: '{{ JENKINS_EC2_ACCESS_KEY_ID }}' - SECRET_ACCESS_KEY: '{{ JENKINS_EC2_SECRET_ACCESS_KEY }}' + CREDENTIAL_ID: '{{ JENKINS_EC2_CREDENTIAL_ID }}' USE_INSTANCE_PROFILE_FOR_CREDS: {{ jenkins_common_use_instance_profile_for_creds }} REGION: '{{ JENKINS_EC2_REGION }}' EC2_PRIVATE_KEY_PATH: '{{ jenkins_common_config_path }}/ec2/id_rsa' INSTANCE_CAP: '{{ jenkins_common_instance_cap }}' + ROLE_ARN: '{{ JENKINS_EC2_ROLE_ARN }}' + ROLE_SESSION_NAME: '{{ JENKINS_EC2_ROLE_SESSION_NAME }}' AMIS: {% for ami in JENKINS_EC2_AMIS %} - AMI_ID: '{{ ami.AMI_ID }}' @@ -16,7 +17,6 @@ CLOUDS: SPOT_INSTANCE_BID_TYPE: '{{ ami.SPOT_CONFIG.SPOT_INSTANCE_BID_TYPE }}' SECURITY_GROUPS: '{{ ami.SECURITY_GROUPS }}' REMOTE_FS_ROOT: '{{ ami.REMOTE_FS_ROOT }}' - SSH_PORT: '{{ ami.SSH_PORT }}' INSTANCE_TYPE: '{{ ami.INSTANCE_TYPE }}' LABEL_STRING: '{{ ami.LABEL_STRING }}' MODE: '{{ ami.MODE }}' @@ -26,7 +26,6 @@ CLOUDS: USER_DATA: '{{ ami.USER_DATA }}' NUM_EXECUTORS: '{{ ami.NUM_EXECUTORS }}' REMOTE_ADMIN: '{{ ami.REMOTE_ADMIN }}' - ROOT_COMMAND_PREFIX: '{{ ami.ROOT_COMMAND_PREFIX }}' JVM_OPTIONS: '{{ ami.JVM_OPTIONS }}' STOP_ON_TERMINATE: {{ ami.STOP_ON_TERMINATE }} SUBNET_ID: '{{ ami.SUBNET_ID }}' @@ -41,4 +40,18 @@ CLOUDS: IAM_INSTANCE_PROFILE: '{{ ami.IAM_INSTANCE_PROFILE }}' USE_EPHEMERAL_DEVICES: {{ ami.USE_EPHEMERAL_DEVICES }} LAUNCH_TIMEOUT: '{{ ami.LAUNCH_TIMEOUT }}' + EBS_OPTIMIZED: {{ ami.EBS_OPTIMIZED }} + DELETE_ROOT_ON_TERMINATION: {{ ami.DELETE_ROOT_ON_TERMINATION }} + AMI_TYPE: + ROOT_COMMAND_PREFIX: '{{ ami.AMI_TYPE.ROOT_COMMAND_PREFIX }}' + SLAVE_COMMAND_PREFIX: '{{ ami.AMI_TYPE.SLAVE_COMMAND_PREFIX }}' + SLAVE_COMMAND_SUFFIX: '{{ ami.AMI_TYPE.SLAVE_COMMAND_SUFFIX }}' + REMOTE_SSH_PORT: '{{ ami.AMI_TYPE.REMOTE_SSH_PORT }}' + USE_DEDICATED_TENANCY: {{ ami.USE_DEDICATED_TENANCY }} + ASSOCIATE_PUBLIC_IP: {{ ami.ASSOCIATE_PUBLIC_IP }} + CUSTOM_DEVICE_MAPPING: '{{ ami.CUSTOM_DEVICE_MAPPING }}' + USE_EXTERNAL_SSH_PROCESS: {{ ami.USE_EXTERNAL_SSH_PROCESS }} + CONNECT_WITH_PUBLIC_IP: {{ ami.CONNECT_WITH_PUBLIC_IP }} + MONITORING: {{ ami.MONITORING }} + T2_UNLIMITED: {{ ami.T2_UNLIMITED }} {% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/email_ext_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/email_ext_config.yml.j2 new file mode 100644 index 00000000000..aaad6cf1743 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/email_ext_config.yml.j2 @@ -0,0 +1,24 @@ +--- +ADV_PROPERTIES: '{{ jenkins_common_email_advanced_properties }}' +DEFAULT_CONTENT_TYPE: '{{ jenkins_common_email_content_type }}' +DEFAULT_SUBJECT: '{{ jenkins_common_default_email_subject }}' +DEFAULT_BODY_PATH: '{{ jenkins_common_email_default_body_path }}' +EMERGENCY_REROUTE: '{{ jenkins_common_email_emergency_reroute }}' +DEFAULT_REPLYTO: '{{ jenkins_common_email_replyto }}' +DEFAULT_PRESEND_SCRIPT_PATH: '{{ jenkins_common_email_default_presend_script_path }}' +DEFAULT_POSTSEND_SCRIPT_PATH: '{{ jenkins_common_email_default_postsend_script_path }}' +DEBUG_MODE: '{{ jenkins_common_email_debug_mode }}' +MAX_ATTACHMENT_SIZE: '{{ jenkins_common_email_max_attachment_size }}' +DEFAULT_RECIPIENTS: '{{ jenkins_common_email_default_recipients }}' +ADD_PRECEDENCE_BULK: '{{ jenkins_common_email_add_precedence_bulk }}' +ALLOWED_DOMAINS: '{{ jenkins_common_email_allowed_domains }}' +EXCLUDED_COMMITTERS: '{{ jenkins_common_email_excluded_committers }}' +REQUIRE_ADMIN_FOR_TEMPLATE_TESTING: '{{ jenkins_common_email_require_admin_for_template_testing }}' +WATCHING_ENABLED: '{{ jenkins_common_email_watching_enabled }}' +ALLOW_UNREGISTERED_ENABLED: '{{ jenkins_common_email_allow_unregistered_enabled }}' +USE_LIST_ID: '{{ jenkins_common_email_use_list_id }}' +LIST_ID': '{{ jenkins_common_email_list_id }}' +TRIGGERS: +{% for trigger in jenkins_common_email_triggers %} + - '{{ trigger }}' +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/groovy_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/groovy_config.yml.j2 new file mode 100644 index 00000000000..c82d3385e4d --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/groovy_config.yml.j2 @@ -0,0 +1,7 @@ +--- +GROOVY_INSTALLATIONS: +{% for installation in jenkins_common_groovy_installations %} + - NAME: '{{ installation.NAME }}' + HOME: '{{ installation.HOME }}' + VERSION: '{{ installation.VERSION }}' +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/hipchat_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/hipchat_config.yml.j2 deleted file mode 100644 index 7246650f5d4..00000000000 --- a/playbooks/roles/jenkins_common/templates/config/hipchat_config.yml.j2 +++ /dev/null @@ -1,4 +0,0 @@ ---- -API_TOKEN: '{{ JENKINS_HIPCHAT_API_TOKEN }}' -ROOM: '{{ jenkins_common_hipchat_room }}' -V2_ENABLED: {{ jenkins_common_hipchat_v2_enabled }} diff --git a/playbooks/roles/jenkins_common/templates/config/main_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/main_config.yml.j2 index ac36715bea4..c1cef5de8ee 100644 --- a/playbooks/roles/jenkins_common/templates/config/main_config.yml.j2 +++ b/playbooks/roles/jenkins_common/templates/config/main_config.yml.j2 @@ -8,7 +8,7 @@ MAIN: {% for label in jenkins_common_main_labels %} - '{{ label }}' {% endfor %} - USAGE: 'EXCLUSIVE' + USAGE: '{{ jenkins_node_usage }}' QUIET_PERIOD: {{ jenkins_common_main_quiet_period }} SCM_RETRY_COUNT: {{ jenkins_common_main_scm_retry }} DISABLE_REMEMBER_ME: {{ jenkins_common_main_disable_remember }} @@ -29,4 +29,5 @@ FORMATTER: DISABLE_SYNTAX_HIGHLIGHTING: {{ jenkins_common_disable_syntax_highlighting }} CLI: CLI_ENABLED: false - +SETUP_WIZARD: + SETUP_WIZARD_ENABLED: false diff --git a/playbooks/roles/jenkins_common/templates/config/python_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/python_config.yml.j2 new file mode 100644 index 00000000000..1d3ff71694d --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/python_config.yml.j2 @@ -0,0 +1,10 @@ +--- +PYTHON_INSTALLATIONS: +{% for installation in jenkins_common_python_installations %} + - PYTHON_ALIAS: '{{ installation.PYTHON_ALIAS }}' + PYTHON_PATH: '{{ installation.PYTHON_PATH }}' + PYTHON_PROPERTIES: [] +{% for property in installation.PYTHON_PROPERTIES %} + - property +{% endfor %} +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/saml_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/saml_config.yml.j2 new file mode 100644 index 00000000000..8f9d75daaba --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/saml_config.yml.j2 @@ -0,0 +1,34 @@ +--- +IDP_METADATA: '{{ JENKINS_SAML_IDP_METADATA }}' +DISPLAY_NAME_ATTRIBUTE: '{{ JENKINS_SAML_DISPLAY_NAME_ATTRIBUTE }}' +GROUP_ATTRIBUTE: '{{ JENKINS_SAML_GROUP_ATTRIBUTE }}' +MAX_AUTH_LIFETIME_SECONDS: '{{ JENKINS_SAML_MAX_AUTH_LIFETIME_SECONDS }}' +USERNAME_ATTRIBUTE: '{{ JENKINS_SAML_USERNAME_ATTRIBUTE }}' +EMAIL_ATTRIBUTE: '{{ JENKINS_SAML_EMAIL_ATTRIBUTE }}' +LOGOUT_URL: '{{ JENKINS_SAML_LOGOUT_URL }}' +{% if JENKINS_SAML_ADVANCED_CONFIGURATION %} +ADVANCED_CONFIGURATION: + FORCE_AUTH: {{ JENKINS_SAML_ADVANCED_CONFIGURATION.FORCE_AUTH }} + CONTEXT_CLASS_REF: '{{ JENKINS_SAML_ADVANCED_CONFIGURATION.CONTEXT_CLASS_REF }}' + ENTITY_ID: '{{ JENKINS_SAML_ADVANCED_CONFIGURATION.ENTITY_ID }}' + MAXIMUM_SESSION_LIFETIME: {{ JENKINS_SAML_ADVANCED_CONFIGURATION.MAXIMUM_SESSION_LIFETIME }} +{% else %} +ADVANCED_CONFIGURATION: {} +{% endif %} +{% if JENKINS_SAML_ENCRYPTION_DATA %} +ENCRYPTION_DATA: + KEY_STORE_PATH: '{{ JENKINS_SAML_ENCRYPTION_DATA.KEY_STORE_PATH }}' + KEY_STORE_PASSWORD: '{{ JENKINS_SAML_ENCRYPTION_DATA.KEY_STORE_PASSWORD }}' + PRIVATE_KEY_PASSWORD: '{{ JENKINS_SAML_ENCRYPTION_DATA.PRIVATE_KEY_PASSWORD }}' + PRIVATE_KEY_ALIAS: '{{ JENKINS_SAML_ENCRYPTION_DATA.PRIVATE_KEY_ALIAS }}' + FORCE_SIGN_REDIRECT_BINDING_AUTH_REQUEST: {{ JENKINS_SAML_ENCRYPTION_DATA.FORCE_SIGN_REDIRECT_BINDING_AUTH_REQUEST }} +{% else %} +ENCRYPTION_DATA: {} +{% endif %} +USERNAME_CASE_CONVENTION: '{{ JENKINS_SAML_USERNAME_CASE_CONVENTION }}' +BINDING: '{{ JENKINS_SAML_BINDING }}' +SAML_CUSTOM_ATTRIBUTES: +{% for attribute in JENKINS_SAML_CUSTOM_ATTRIBUTES %} + - ATTRIBUTE_NAME: '{{ attribute.ATTRIBUTE_NAME }}' + ATTRIBUTE_VALUE: '{{ attribute.ATTRIBUTE_VALUE }}' +{% endfor %} diff --git a/playbooks/roles/jenkins_common/templates/config/security.yml.j2 b/playbooks/roles/jenkins_common/templates/config/security.yml.j2 index c90dca2c6e6..855c0c76abd 100644 --- a/playbooks/roles/jenkins_common/templates/config/security.yml.j2 +++ b/playbooks/roles/jenkins_common/templates/config/security.yml.j2 @@ -18,3 +18,5 @@ SECURITY_GROUPS: {% endfor %} {% endfor %} DSL_SCRIPT_SECURITY_ENABLED: {{ jenkins_common_dsl_script_security_enabled }} +CSRF_PROTECTION_ENABLED: {{ JENKINS_CSRF_PROTECTION_ENABLED }} +CSRF_PROXY_COMPATIBILITY: {{ JENKINS_CSRF_PROXY_COMPATIBILITY }} diff --git a/playbooks/roles/jenkins_common/templates/config/slack_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/slack_config.yml.j2 new file mode 100644 index 00000000000..8ae6c9097d1 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/slack_config.yml.j2 @@ -0,0 +1,8 @@ +--- +SLACK_BASE_URL: '{{ JENKINS_SLACK_BASE_URL }}' +IS_SLACK_BOT: '{{ JENKINS_IS_SLACK_BOT }}' +SLACK_ROOM: '{{ JENKINS_SLACK_ROOM }}' +SLACK_TEAM_DOMAIN: '{{ JENKINS_SLACK_TEAM_DOMAIN }}' +# The following must be an id of a credential created in +# src/main/groovy/3importCredentials.groovy +SLACK_CREDENTIAL_ID: '{{ JENKINS_SLACK_CREDENTIAL_ID }}' diff --git a/playbooks/roles/jenkins_common/templates/config/timestamper_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/timestamper_config.yml.j2 new file mode 100644 index 00000000000..c1aff3b42d9 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/timestamper_config.yml.j2 @@ -0,0 +1,4 @@ +--- +SYSTEM_CLOCK_FORMAT: "{{ jenkins_common_timestamper_system_clock_format }}" +ELAPSED_TIME_FORMAT: "{{ jenkins_common_timestamper_elapsed_time_format }}" +ENABLED_ON_PIPELINES: {{ jenkins_common_timestamper_enabled_on_pipelines }} diff --git a/playbooks/roles/jenkins_common/templates/config/user_config.yml.j2 b/playbooks/roles/jenkins_common/templates/config/user_config.yml.j2 new file mode 100644 index 00000000000..3ff73bb0283 --- /dev/null +++ b/playbooks/roles/jenkins_common/templates/config/user_config.yml.j2 @@ -0,0 +1,6 @@ +--- +{% for user in JENKINS_USER_LIST %} +- USERNAME: '{{ user.USERNAME }}' + PASSWORD: '{{ user.PASSWORD }}' + EMAIL_ADDRESS: '{{ user.EMAIL_ADDRESS }}' +{% endfor %} diff --git a/playbooks/roles/jenkins_de/defaults/main.yml b/playbooks/roles/jenkins_data_engineering/defaults/main.yml similarity index 61% rename from playbooks/roles/jenkins_de/defaults/main.yml rename to playbooks/roles/jenkins_data_engineering/defaults/main.yml index af0892adbb0..e04b79dfb09 100644 --- a/playbooks/roles/jenkins_de/defaults/main.yml +++ b/playbooks/roles/jenkins_data_engineering/defaults/main.yml @@ -1,78 +1,56 @@ -de_jenkins_user_uid: 1002 -de_jenkins_group_gid: 1004 -de_jenkins_version: jenkins-2.73.2 -de_jenkins_common_war_source: https://edx-analytics-public.s3.amazonaws.com/packages -de_jenkins_jvm_args: '-Djava.awt.headless=true -Xmx8192m -Djenkins.install.runSetupWizard=false' -de_jenkins_configuration_scripts: - - 1addJarsToClasspath.groovy - - 2checkInstalledPlugins.groovy - - 3importCredentials.groovy - - 3setGlobalProperties.groovy - - 3shutdownCLI.groovy - - 4configureGit.groovy - - 4configureJobConfigHistory.groovy - - 4configureMailerPlugin.groovy - - 4configureMaskPasswords.groovy - - 5createLoggers.groovy +--- +# See README.rst for variable descriptions + +jenkins_home: '/var/lib/jenkins' + +JENKINS_SERVER_NAME: 'jenkins.analytics.edx.org' +jenkins_user: 'jenkins' +jenkins_group: 'jenkins' +jenkins_user_home: '/home/{{ jenkins_user }}' +jenkins_port: 8080 +jenkins_nginx_port: 80 +jenkins_protocol_https: true +AUTOMATION_USER: 'edx-analytics-automation' +jenkins_host_name: "{{ JENKINS_SERVER_NAME | default('jenkins') }}" + +JENKINS_VOLUMES: [] + +de_jenkins_seed_name: 'data_engineering_seed_job' +de_jenkins_seed_path: '{{ jenkins_common_config_path }}/xml/seed_job.xml' +de_jenkins_seed_job_source: '{{ role_path }}/../jenkins_data_engineering/files/xml/seed_job.xml' + +de_jenkins_user_uid: 900 +de_jenkins_group_gid: 900 +DE_JENKINS_VERSION: jenkins_2.89.4 +de_jenkins_version: "{{ DE_JENKINS_VERSION }}" +de_jenkins_jvm_args: '-Djava.awt.headless=true -Xmx16g -Djenkins.install.runSetupWizard=false' + +jenkins_base_environment_variables: + - NAME: 'AUTOMATION_USER' + VALUE: '{{ AUTOMATION_USER }}' + - NAME: 'PAGER_NOTIFY' + VALUE: '{{ PAGER_NOTIFY }}' + +JENKINS_DATA_ENGINEERING_EXTRA_PKGS: + - gettext + - pkg-config + - libsqlite3-dev + - libgeos-dev + - libmysqlclient-dev + - libgraphviz-dev + - libxmlsec1-dev + - zip # plugins de_jenkins_plugins_list: - - name: 'antisamy-markup-formatter' - version: '1.3' - group: 'org.jenkins-ci.plugins' - - name: 'script-security' - version: '1.27' - group: 'org.jenkins-ci.plugins' - - name: 'mailer' - version: '1.16' - group: 'org.jenkins-ci.plugins' - - name: 'cvs' - version: '2.12' - group: 'org.jenkins-ci.plugins' - - name: 'ldap' - version: '1.11' + - name: 'ansicolor' + version: '0.5.2' group: 'org.jenkins-ci.plugins' - name: 'ant' - version: '1.2' - group: 'org.jenkins-ci.plugins' - - name: 'matrix-auth' - version: '1.2' - group: 'org.jenkins-ci.plugins' - - name: 'matrix-project' - version: '1.4.1' - group: 'org.jenkins-ci.plugins' - - name: 'credentials' - version: '1.24' + version: '1.8' group: 'org.jenkins-ci.plugins' - - name: 'ssh-credentials' - version: '1.11' - group: 'org.jenkins-ci.plugins' - - name: 'external-monitor-job' - version: '1.4' - group: 'org.jenkins-ci.plugins' - - name: 'translation' - version: '1.12' - group: 'org.jenkins-ci.plugins' - - name: 'subversion' - version: '2.4.5' - group: 'org.jenkins-ci.plugins' - - name: 'junit' - version: '1.3' - group: 'org.jenkins-ci.plugins' - - name: 'pam-auth' - version: '1.2' - group: 'org.jenkins-ci.plugins' - - name: 'maven-plugin' - version: '2.8' - group: 'org.jenkins-ci.main' - - name: 'ssh-slaves' - version: '1.9' - group: 'org.jenkins-ci.plugins' - - name: 'javadoc' - version: '1.3' - group: 'org.jenkins-ci.plugins' - - name: 'ansicolor' - version: '0.4.1' + - name: 'antisamy-markup-formatter' + version: '1.5' group: 'org.jenkins-ci.plugins' - name: 'buildgraph-view' version: '1.1.1' @@ -81,32 +59,59 @@ de_jenkins_plugins_list: version: '1.3' group: 'org.jenkins-ci.plugins' - name: 'build-timeout' - version: '1.14.1' + version: '1.19' group: 'org.jenkins-ci.plugins' - name: 'build-user-vars-plugin' version: '1.5' group: 'org.jenkins-ci.plugins' - name: 'cobertura' - version: '1.9.6' + version: '1.12.1' group: 'org.jenkins-ci.plugins' - name: 'copyartifact' - version: '1.32.1' + version: '1.39' + group: 'org.jenkins-ci.plugins' + - name: 'credentials' + version: '2.1.18' group: 'org.jenkins-ci.plugins' - name: 'credentials-binding' - version: '1.7' + version: '1.15' + group: 'org.jenkins-ci.plugins' + - name: 'cvs' + version: '2.14' group: 'org.jenkins-ci.plugins' - name: 'ec2' version: '1.28' group: 'org.jenkins-ci.plugins' + - name: 'email-ext' + version: '2.62' + group: 'org.jenkins-ci.plugins' - name: 'envinject' version: '1.92.1' group: 'org.jenkins-ci.plugins' - name: 'exclusive-execution' version: '0.8' group: 'org.jenkins-ci.plugins' + - name: 'external-monitor-job' + version: '1.4' + group: 'org.jenkins-ci.plugins' - name: 'flexible-publish' version: '0.15.2' group: 'org.jenkins-ci.plugins' + - name: 'git' + version: '3.4.0' + group: 'org.jenkins-ci.plugins' + - name: 'git-client' + version: '2.7.2' + group: 'org.jenkins-ci.plugins' + - name: 'github' + version: '1.29.1' + group: 'com.coravy.hudson.plugins.github' + - name: 'github-api' + version: '1.90' + group: 'org.jenkins-ci.plugins' + - name: 'github-oauth' + version: '0.29' + group: 'org.jenkins-ci.plugins' - name: 'gradle' version: '1.24' group: 'org.jenkins-ci.plugins' @@ -116,21 +121,39 @@ de_jenkins_plugins_list: - name: 'groovy-postbuild' version: '2.2' group: 'org.jvnet.hudson.plugins' - - name: 'hockeyapp' - version: '1.2.1' - group: 'org.jenkins-ci.plugins' - name: 'htmlpublisher' version: '1.10' group: 'org.jenkins-ci.plugins' + - name: 'javadoc' + version: '1.3' + group: 'org.jenkins-ci.plugins' - name: 'jobConfigHistory' - version: '2.10' + version: '2.18' group: 'org.jenkins-ci.plugins' - name: 'job-dsl' - version: '1.45' + version: '1.67' + group: 'org.jenkins-ci.plugins' + - name: 'junit' + version: '1.24' + group: 'org.jenkins-ci.plugins' + - name: 'ldap' + version: '1.11' + group: 'org.jenkins-ci.plugins' + - name: 'mailer' + version: '1.18' group: 'org.jenkins-ci.plugins' - name: 'mask-passwords' version: '2.8' group: 'org.jenkins-ci.plugins' + - name: 'matrix-auth' + version: '1.2' + group: 'org.jenkins-ci.plugins' + - name: 'matrix-project' + version: '1.13' + group: 'org.jenkins-ci.plugins' + - name: 'maven-plugin' + version: '3.1.2' + group: 'org.jenkins-ci.main' - name: 'monitoring' version: '1.56.0' group: 'org.jvnet.hudson.plugins' @@ -140,8 +163,11 @@ de_jenkins_plugins_list: - name: 'nodelabelparameter' version: '1.7.2' group: 'org.jenkins-ci.plugins' + - name: 'pam-auth' + version: '1.2' + group: 'org.jenkins-ci.plugins' - name: 'parameterized-trigger' - version: '2.25' + version: '2.34' group: 'org.jenkins-ci.plugins' - name: 'PrioritySorter' version: '2.9' @@ -149,30 +175,57 @@ de_jenkins_plugins_list: - name: 'rebuild' version: '1.25' group: 'com.sonyericsson.hudson.plugins.rebuild' + - name: 'reverse-proxy-auth-plugin' + version: '1.5' + group: 'org.jenkins-ci.plugins' - name: 'run-condition' version: '1.0' group: 'org.jenkins-ci.plugins' - name: 'shiningpanda' - version: '0.21' + version: '0.23' + group: 'org.jenkins-ci.plugins' + - name: 'script-security' + version: '1.44' group: 'org.jenkins-ci.plugins' - name: 'ssh-agent' version: '1.5' group: 'org.jenkins-ci.plugins' + - name: 'ssh-credentials' + version: '1.14' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-slaves' + version: '1.9' + group: 'org.jenkins-ci.plugins' + - name: 'subversion' + version: '2.5.7' + group: 'org.jenkins-ci.plugins' - name: 'text-finder' version: '1.10' group: 'org.jenkins-ci.plugins' + - name: 'throttle-concurrents' + version: '2.0.1' + group: 'org.jenkins-ci.plugins' - name: 'timestamper' version: '1.5.15' group: 'org.jenkins-ci.plugins' + - name: 'token-macro' + version: '2.3' + group: 'org.jenkins-ci.plugins' + - name: 'translation' + version: '1.12' + group: 'org.jenkins-ci.plugins' - name: 'violations' version: '0.7.11' group: 'org.jenkins-ci.plugins' + - name: 'workflow-job' + version: '2.11' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-step-api' + version: '2.12' + group: 'org.jenkins-ci.plugins.workflow' - name: 'xunit' version: '1.93' group: 'org.jenkins-ci.plugins' - - name: 'reverse-proxy-auth-plugin' - version: '1.5' - group: 'org.jenkins-ci.plugins' # ghprb de_jenkins_ghprb_white_list_phrase: '.*[Aa]dd\W+to\W+whitelist.*' @@ -184,18 +237,13 @@ de_jenkins_ghprb_cron_schedule: 'H/5 * * * *' # github JENKINS_GITHUB_CONFIG: '' -# hipchat -de_jenkins_hipchat_room: 'Data Engineering' - # ec2 -de_jenkins_instance_cap: '250' +de_jenkins_instance_cap: '500' -# seed -de_jenkins_seed_name: 'manually_seed_one_job' +JENKINS_DATA_ENGINEERING_CONCURRENT_JOBS_COUNT: 30 -# logs -de_jenkins_log_list: {} +jenkins_connection_retries: 240 +jenkins_connection_delay: 1 -# job config history -de_jenkins_history_max_days: '15' -de_jenkins_history_exclude_pattern: 'queue|nodeMonitors|UpdateCenter|global-build-stats|GhprbTrigger' +jenkins_private_keyfile: "{{ jenkins_user_home }}/.ssh/id_rsa" +jenkins_public_keyfile: "{{ jenkins_private_keyfile }}.pub" diff --git a/playbooks/roles/jenkins_data_engineering/files/xml/seed_job.xml b/playbooks/roles/jenkins_data_engineering/files/xml/seed_job.xml new file mode 100644 index 00000000000..916925cd64d --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering/files/xml/seed_job.xml @@ -0,0 +1,133 @@ + + + + Run createJobs script to seed all dsl jobs. + false + + + + -1 + 20 + -1 + -1 + + + + false + -1 + + + false + false + + + + + DSL_SCRIPT + Path to dsl script to run, from the root of the https://github.com/edx/jenkins-job-dsl repo (i.e. sample/jobs/sampleJob.groovy) + {{ JENKINS_JOB_DSL_SCRIPT_NAME }} + + + JOB_DSL_BRANCH + Branch of jenkins-job-dsl repo to use + {{ JENKINS_JOB_DSL_REPO_BRANCH }} + + + SECURE_BRANCH + Branch of the secure repo to use + {{ JENKINS_JOB_DSL_SECURE_BRANCH }} + + + COMMON_VARS_DIR + Base path for job specific configurations. + analytics-secure-config/job-configs/ + + + + + + + + 2 + + + https://github.com/edx/jenkins-job-dsl.git + + + + + ${JOB_DSL_BRANCH} + + + false + + + + + 2 + + + {{ JENKINS_JOB_DSL_SECURE_REPO_SSH }} + 1 + + + + + $SECURE_BRANCH + + + false + + + + analytics-secure-config + + + + + + + + false + false + false + false + + false + + + #!/usr/bin/env bash + # exit if user-supplied parameter does not exist + if [ ! -e ${DSL_SCRIPT} ]; then + echo "DSL Script '{DSL_SCRIPT}' does not exist. Please try again" + exit 1 + fi + + + + tert + + libs +assemble + + + (Default) + true + true + true + true + + + ${DSL_SCRIPT} + false + false + IGNORE + IGNORE + JENKINS_ROOT + lib/snakeyaml-1.17.jar +src/main/groovy + + + + + diff --git a/playbooks/roles/jenkins_data_engineering/meta/main.yml b/playbooks/roles/jenkins_data_engineering/meta/main.yml new file mode 100644 index 00000000000..91991dc67c1 --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering/meta/main.yml @@ -0,0 +1,85 @@ +--- +dependencies: + - common + - role: jenkins_common + jenkins_node_usage: 'NORMAL' + jenkins_common_version: '{{ de_jenkins_version }}' + jenkins_common_user_uid: '{{ de_jenkins_user_uid }}' + jenkins_common_group_gid: '{{ de_jenkins_group_gid }}' + jenkins_common_jvm_args: '{{ de_jenkins_jvm_args }}' + jenkins_common_main_labels: + - master + jenkins_common_configuration_scripts: + - 1addJarsToClasspath.groovy + - 2checkInstalledPlugins.groovy + - 3importCredentials.groovy + - 3mainConfiguration.groovy + - 3setGlobalProperties.groovy + - 3shutdownCLI.groovy + - 4configureGHOAuth.groovy + - 4configureGithub.groovy + - 4configureJobConfigHistory.groovy + - 4configureMailerPlugin.groovy + - 4configureMaskPasswords.groovy + - 4configureSecurity.groovy + - 5createLoggers.groovy + - 5addSeedJob.groovy + - 5configureEmailExtension.groovy + jenkins_common_plugins_list: '{{ de_jenkins_plugins_list }}' + jenkins_common_ghprb_white_list_phrase: '{{ de_jenkins_ghprb_white_list_phrase }}' + jenkins_common_ghprb_ok_phrase: '{{ de_jenkins_ghprb_ok_phrase }}' + jenkins_common_ghprb_retest_phrase: '{{ de_jenkins_ghprb_retest_phrase }}' + jenkins_common_ghprb_skip_phrase: '{{ de_jenkins_ghprb_skip_phrase }}' + jenkins_common_ghprb_cron_schedule: '{{ de_jenkins_ghprb_cron_schedule }}' + jenkins_common_github_configs: '{{ JENKINS_GITHUB_CONFIG }}' + jenkins_common_instance_cap: '{{ de_jenkins_instance_cap }}' + jenkins_common_seed_name: '{{ de_jenkins_seed_name }}' + jenkins_common_seed_path: '{{ de_jenkins_seed_path }}' + jenkins_common_protocol_https: false + jenkins_common_server_name: '{{ JENKINS_SERVER_NAME }}' + AUTOMATION_PRIVATE_KEY_SOURCE_PATH: null + jenkins_common_main_num_executors: '{{ JENKINS_DATA_ENGINEERING_CONCURRENT_JOBS_COUNT }}' + jenkins_common_jenkins_configuration_branch: '{{ JENKINS_CONFIGURATION_REPO_BRANCH }}' + jenkins_common_seed_job_source: '{{ de_jenkins_seed_job_source }}' + jenkins_common_dsl_script_security_enabled: false + jenkins_common_email_replyto: '{{ JENKINS_MAILER_REPLY_TO_ADDRESS }}' + JENKINS_SECURITY_GROUPS: + - NAME: 'Adminstrator' + PERMISSIONS: + - com.cloudbees.plugins.credentials.CredentialsProvider.Create + - com.cloudbees.plugins.credentials.CredentialsProvider.Delete + - com.cloudbees.plugins.credentials.CredentialsProvider.ManageDomains + - com.cloudbees.plugins.credentials.CredentialsProvider.Update + - com.cloudbees.plugins.credentials.CredentialsProvider.View + - hudson.model.Computer.Build + - hudson.model.Computer.Configure + - hudson.model.Computer.Connect + - hudson.model.Computer.Create + - hudson.model.Computer.Delete + - hudson.model.Computer.Disconnect + - hudson.model.Hudson.Administer + - hudson.model.Hudson.ConfigureUpdateCenter + - hudson.model.Hudson.Read + - hudson.model.Hudson.RunScripts + - hudson.model.Hudson.UploadPlugins + - hudson.model.Item.Build + - hudson.model.Item.Cancel + - hudson.model.Item.Configure + - hudson.model.Item.Create + - hudson.model.Item.Delete + - hudson.model.Item.Discover + - hudson.model.Item.Move + - hudson.model.Item.Read + - hudson.model.Item.Workspace + - hudson.model.Run.Delete + - hudson.model.Run.Replay + - hudson.model.Run.Update + - hudson.model.View.Configure + - hudson.model.View.Create + - hudson.model.View.Delete + - hudson.model.View.Read + - hudson.scm.SCM.Tag + USERS: '{{ JENKINS_DATA_ENGINEERING_AUTH_ADMINISTRATORS }}' + jenkins_common_main_env_vars: '{{ jenkins_base_environment_variables }} + {{ jenkins_additional_environment_variables }}' + + - role: mongo_client diff --git a/playbooks/roles/jenkins_data_engineering/tasks/main.yml b/playbooks/roles/jenkins_data_engineering/tasks/main.yml new file mode 100644 index 00000000000..93339b6480d --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering/tasks/main.yml @@ -0,0 +1,118 @@ +--- +# Tasks for role jenkins_data_engineering +# +# Overview: +# +# This role sets up a Jenkins Instance for analytics tasks. + +- name: Setting the hostname + hostname: + name: "{{ jenkins_host_name }}" + +- name: install jenkins analytics extra system packages + apt: + pkg={{ item }} state=present update_cache=yes + with_items: "{{ JENKINS_DATA_ENGINEERING_EXTRA_PKGS }}" + tags: + - jenkins + +- name: Create /edx/var/edxapp dir + file: + path: "/edx/var/edxapp" + state: directory + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0755 + tags: + - jenkins-edxapp + +- name: create directory + file: + path: "/home/{{ jenkins_user }}/.ssh" + state: directory + owner: '{{ jenkins_user }}' + group: '{{ jenkins_group }}' + tags: + - jenkins-auth + +- copy: + src: '{{ JENKINS_DATA_ENGINEERING_AUTOMATION_PRIVATE_KEY_SOURCE_PATH }}' + dest: '{{ jenkins_private_keyfile }}' + owner: '{{ jenkins_user }}' + group: '{{ jenkins_group }}' + mode: 0600 + tags: + - jenkins-auth + +- copy: + src: '{{ JENKINS_DATA_ENGINEERING_AUTOMATION_PUBLIC_KEY_SOURCE_PATH }}' + dest: '{{ jenkins_public_keyfile }}' + owner: '{{ jenkins_user }}' + group: '{{ jenkins_group }}' + mode: 0600 + tags: + - jenkins-auth + +- name: jenkins user config dir + file: + name: "{{ jenkins_home }}/users/{{ jenkins_user }}" + state: directory + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + tags: + - jenkins-auth + +- name: template jenkins user config.xml + template: + src: jenkins.user.config.xml + dest: "{{ jenkins_home }}/users/{{ jenkins_user }}/config.xml" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + force: no # don't overwrite if already there + tags: + - jenkins-auth + +- name: fetch jenkins user public key + shell: "cat {{ jenkins_public_keyfile }}" + register: jenkins_public_key + tags: + - jenkins-auth + +- name: add jenkins user public key + lineinfile: + dest: "{{ jenkins_home }}/users/{{ jenkins_user }}/config.xml" + state: present + regexp: "^\\s*" + line: "{{ jenkins_public_key.stdout }}" + tags: + - jenkins-auth + +- name: Wait for Jenkins to start up before proceeding. + shell: "curl -D - --silent --max-time 5 {{ JENKINS_MAIN_URL }}cli/" + register: result + until: (result.stdout.find("403 Forbidden") != -1) or (result.stdout.find("200 OK") != -1) and (result.stdout.find("Please wait while") == -1) + retries: 60 + delay: 10 + changed_when: false + check_mode: no + tags: + - jenkins-auth + +- name: wipe initialization scripts from jenkins_commons + file: + path: '{{ jenkins_home }}/init.groovy.d/' + state: absent + tags: + - jenkins-auth + +- name: wipe initialization configuration files from jenkins_commons + file: + path: '{{ jenkins_home }}/init-configs/' + state: absent + tags: + - jenkins-auth + +- name: restart Jenkins + service: name=jenkins state=restarted + tags: + - jenkins-auth diff --git a/playbooks/roles/jenkins_data_engineering/templates/jenkins.user.config.xml b/playbooks/roles/jenkins_data_engineering/templates/jenkins.user.config.xml new file mode 100644 index 00000000000..0ec74ca3e0a --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering/templates/jenkins.user.config.xml @@ -0,0 +1,53 @@ + + + {{ jenkins_user }} + + + + + + + + + + + + + + + + + + + + + edx + shadow + jenkins + authenticated + + 1457073573763 + + + + + + + All + false + false + + + + + + + + + + + + false + + + diff --git a/playbooks/roles/jenkins_data_engineering_new/defaults/main.yml b/playbooks/roles/jenkins_data_engineering_new/defaults/main.yml new file mode 100644 index 00000000000..aa72105cf77 --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering_new/defaults/main.yml @@ -0,0 +1,255 @@ +--- +# See README.rst for variable descriptions + +jenkins_home: '/var/lib/jenkins' + +JENKINS_SERVER_NAME: 'jenkins-new.analytics.edx.org' +jenkins_user: 'jenkins' +jenkins_group: 'jenkins' +jenkins_user_home: '/home/{{ jenkins_user }}' +jenkins_port: 8080 +jenkins_nginx_port: 80 +jenkins_protocol_https: true +AUTOMATION_USER: 'edx-analytics-automation' +jenkins_host_name: "{{ JENKINS_SERVER_NAME | default('jenkins') }}" +# We should expect a 403 Forbidden from Jenkins during the init stage, +# as it should not be publicly available. +de_jenkins_ready_status_code: 403 + +JENKINS_VOLUMES: [] + +de_jenkins_seed_name: 'data_engineering_seed_job' +de_jenkins_seed_path: '{{ jenkins_common_config_path }}/xml/seed_job.xml' +de_jenkins_seed_job_source: '{{ role_path }}/../jenkins_data_engineering_new/files/xml/seed_job.xml' + +de_jenkins_user_uid: 900 +de_jenkins_group_gid: 900 +DE_JENKINS_VERSION: jenkins_2.222.3 +de_jenkins_version: "{{ DE_JENKINS_VERSION }}" +de_jenkins_jvm_args: '-Djava.awt.headless=true -Xmx16g -Djenkins.install.runSetupWizard=false' + +jenkins_base_environment_variables: + - NAME: 'AUTOMATION_USER' + VALUE: '{{ AUTOMATION_USER }}' + - NAME: 'PAGER_NOTIFY' + VALUE: '{{ PAGER_NOTIFY }}' + +JENKINS_DATA_ENGINEERING_EXTRA_PKGS: + - gettext + - pkg-config + - libsqlite3-dev + - libgeos-dev + - libmysqlclient-dev + - libgraphviz-dev + - libxmlsec1-dev + - zip + +# plugins +de_jenkins_plugins_list: + - name: 'ansicolor' + version: '0.5.2' + group: 'org.jenkins-ci.plugins' + - name: 'ant' + version: '1.8' + group: 'org.jenkins-ci.plugins' + - name: 'antisamy-markup-formatter' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'aws-credentials' + version: '1.24' + group: 'org.jenkins-ci.plugins' + - name: 'build-name-setter' + version: '1.3' + group: 'org.jenkins-ci.plugins' + - name: 'build-timeout' + version: '1.19' + group: 'org.jenkins-ci.plugins' + - name: 'build-user-vars-plugin' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'copyartifact' + version: '1.39' + group: 'org.jenkins-ci.plugins' + - name: 'credentials' + version: '2.3.0' + group: 'org.jenkins-ci.plugins' + - name: 'credentials-binding' + version: '1.15' + group: 'org.jenkins-ci.plugins' + - name: 'email-ext' + version: '2.66' + group: 'org.jenkins-ci.plugins' + - name: 'envinject' + version: '2.3.0' + group: 'org.jenkins-ci.plugins' + - name: 'exclusive-execution' + version: '0.8' + group: 'org.jenkins-ci.plugins' + - name: 'external-monitor-job' + version: '1.4' + group: 'org.jenkins-ci.plugins' + - name: 'flexible-publish' + version: '0.15.2' + group: 'org.jenkins-ci.plugins' + - name: 'ghprb' + version: '1.42.1' + group: 'org.jenkins-ci.plugins' + - name: 'git' + version: '4.2.2' + group: 'org.jenkins-ci.plugins' + - name: 'git-client' + version: '3.0.0' + group: 'org.jenkins-ci.plugins' + - name: 'github' + version: '1.29.2' + group: 'com.coravy.hudson.plugins.github' + - name: 'github-api' + version: '1.111' + group: 'org.jenkins-ci.plugins' + - name: 'github-oauth' + version: '0.33' + group: 'org.jenkins-ci.plugins' + - name: 'gradle' + version: '1.29' + group: 'org.jenkins-ci.plugins' + - name: 'groovy' + version: '2.2' + group: 'org.jenkins-ci.plugins' + - name: 'groovy-postbuild' + version: '2.4' + group: 'org.jvnet.hudson.plugins' + - name: 'htmlpublisher' + version: '1.21' + group: 'org.jenkins-ci.plugins' + - name: 'javadoc' + version: '1.3' + group: 'org.jenkins-ci.plugins' + - name: 'jobConfigHistory' + version: '2.24' + group: 'org.jenkins-ci.plugins' + - name: 'job-dsl' + version: '1.77' + group: 'org.jenkins-ci.plugins' + - name: 'junit' + version: '1.28' + group: 'org.jenkins-ci.plugins' + - name: 'ldap' + version: '1.20' + group: 'org.jenkins-ci.plugins' + - name: 'mailer' + version: '1.23' + group: 'org.jenkins-ci.plugins' + - name: 'mask-passwords' + version: '2.13' + group: 'org.jenkins-ci.plugins' + - name: 'matrix-auth' + version: '2.3' + group: 'org.jenkins-ci.plugins' + - name: 'matrix-project' + version: '1.14' + group: 'org.jenkins-ci.plugins' + - name: 'maven-plugin' + version: '3.4' + group: 'org.jenkins-ci.main' + - name: 'monitoring' + version: '1.76.0' + group: 'org.jvnet.hudson.plugins' + - name: 'multiple-scms' + version: '0.6' + group: 'org.jenkins-ci.plugins' + - name: 'nodelabelparameter' + version: '1.7.2' + group: 'org.jenkins-ci.plugins' + - name: 'pam-auth' + version: '1.5.1' + group: 'org.jenkins-ci.plugins' + - name: 'parameterized-trigger' + version: '2.35.2' + group: 'org.jenkins-ci.plugins' + - name: 'PrioritySorter' + version: '2.9' + group: 'org.jenkins-ci.plugins' + - name: 'rebuild' + version: '1.31' + group: 'com.sonyericsson.hudson.plugins.rebuild' + - name: 'reverse-proxy-auth-plugin' + version: '1.6.3' + group: 'org.jenkins-ci.plugins' + - name: 'run-condition' + version: '1.0' + group: 'org.jenkins-ci.plugins' + - name: 'shiningpanda' + version: '0.23' + group: 'org.jenkins-ci.plugins' + - name: 'script-security' + version: '1.71' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-agent' + version: '1.17' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-credentials' + version: '1.17.3' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-slaves' + version: '1.28.1' + group: 'org.jenkins-ci.plugins' + - name: 'subversion' + version: '2.13.1' + group: 'org.jenkins-ci.plugins' + - name: 'text-finder' + version: '1.10' + group: 'org.jenkins-ci.plugins' + - name: 'throttle-concurrents' + version: '2.0.1' + group: 'org.jenkins-ci.plugins' + - name: 'timestamper' + version: '1.11.2' + group: 'org.jenkins-ci.plugins' + - name: 'token-macro' + version: '2.10' + group: 'org.jenkins-ci.plugins' + - name: 'translation' + version: '1.16' + group: 'org.jenkins-ci.plugins' + - name: 'violations' + version: '0.7.11' + group: 'org.jenkins-ci.plugins' + - name: 'workflow-cps' + version: '2.80' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-cps-global-lib' + version: '2.15' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-durable-task-step' + version: '2.35' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-job' + version: '2.35' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-step-api' + version: '2.21' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'xunit' + version: '1.93' + group: 'org.jenkins-ci.plugins' + +# ghprb +de_jenkins_ghprb_white_list_phrase: '.*[Aa]dd\W+to\W+whitelist.*' +de_jenkins_ghprb_ok_phrase: '.*ok\W+to\W+test.*' +de_jenkins_ghprb_retest_phrase: '.*jenkins\W+run\W+all.*' +de_jenkins_ghprb_skip_phrase: '.*\[[Ss]kip\W+ci\].*' +de_jenkins_ghprb_cron_schedule: 'H/5 * * * *' + +# github +JENKINS_GITHUB_CONFIG: '' + +# ec2 +de_jenkins_instance_cap: '500' + +JENKINS_DATA_ENGINEERING_CONCURRENT_JOBS_COUNT: 30 + +jenkins_connection_retries: 240 +jenkins_connection_delay: 1 + +jenkins_private_keyfile: "{{ jenkins_user_home }}/.ssh/id_rsa" +jenkins_public_keyfile: "{{ jenkins_private_keyfile }}.pub" diff --git a/playbooks/roles/jenkins_data_engineering_new/files/xml/seed_job.xml b/playbooks/roles/jenkins_data_engineering_new/files/xml/seed_job.xml new file mode 100644 index 00000000000..cb450aa05f8 --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering_new/files/xml/seed_job.xml @@ -0,0 +1,132 @@ + + + + Run createJobs script to seed all dsl jobs. + false + + + + -1 + 20 + -1 + -1 + + + + false + -1 + + + false + false + + + + + DSL_SCRIPT + Path to dsl script to run, from the root of the https://github.com/edx/jenkins-job-dsl repo (i.e. sample/jobs/sampleJob.groovy) + {{ JENKINS_JOB_DSL_SCRIPT_NAME }} + + + JOB_DSL_BRANCH + Branch of jenkins-job-dsl repo to use + {{ JENKINS_JOB_DSL_REPO_BRANCH }} + + + SECURE_BRANCH + Branch of the secure repo to use + {{ JENKINS_JOB_DSL_SECURE_BRANCH }} + + + COMMON_VARS_DIR + Base path for job specific configurations. + analytics-secure-config/job-configs/ + + + + + + + + 2 + + + https://github.com/edx/jenkins-job-dsl.git + + + + + ${JOB_DSL_BRANCH} + + + false + + + + + 2 + + + {{ JENKINS_JOB_DSL_SECURE_REPO_SSH }} + 1 + + + + + $SECURE_BRANCH + + + false + + + + analytics-secure-config + + + + + + + + false + false + false + false + + false + + + #!/usr/bin/env bash + # exit if user-supplied parameter does not exist + if [ ! -e ${DSL_SCRIPT} ]; then + echo "DSL Script '{DSL_SCRIPT}' does not exist. Please try again" + exit 1 + fi + + + + + libs +assemble + + + (Default) + true + true + true + true + + + ${DSL_SCRIPT} + false + false + IGNORE + IGNORE + JENKINS_ROOT + lib/snakeyaml-1.17.jar +src/main/groovy + + + + + diff --git a/playbooks/roles/jenkins_data_engineering_new/meta/main.yml b/playbooks/roles/jenkins_data_engineering_new/meta/main.yml new file mode 100644 index 00000000000..81b2ccf6577 --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering_new/meta/main.yml @@ -0,0 +1,49 @@ +--- +dependencies: + - common + - role: jenkins_common + jenkins_node_usage: 'NORMAL' + jenkins_common_version: '{{ de_jenkins_version }}' + jenkins_common_user_uid: '{{ de_jenkins_user_uid }}' + jenkins_common_group_gid: '{{ de_jenkins_group_gid }}' + jenkins_common_jvm_args: '{{ de_jenkins_jvm_args }}' + jenkins_common_main_labels: + - master + jenkins_common_configuration_scripts: + - 1addJarsToClasspath.groovy + - 2checkInstalledPlugins.groovy + - 3importCredentials.groovy + - 3mainConfiguration.groovy + - 3setGlobalProperties.groovy + - 4configureGHOAuth.groovy + - 4configureGHPRB.groovy + - 4configureGithub.groovy + - 4configureJobConfigHistory.groovy + - 4configureMailerPlugin.groovy + - 4configureMaskPasswords.groovy + - 4configureSecurity.groovy + - 5createLoggers.groovy + - 5addSeedJob.groovy + - 5configureEmailExtension.groovy + jenkins_common_plugins_list: '{{ de_jenkins_plugins_list }}' + jenkins_common_ghprb_white_list_phrase: '{{ de_jenkins_ghprb_white_list_phrase }}' + jenkins_common_ghprb_ok_phrase: '{{ de_jenkins_ghprb_ok_phrase }}' + jenkins_common_ghprb_retest_phrase: '{{ de_jenkins_ghprb_retest_phrase }}' + jenkins_common_ghprb_skip_phrase: '{{ de_jenkins_ghprb_skip_phrase }}' + jenkins_common_ghprb_cron_schedule: '{{ de_jenkins_ghprb_cron_schedule }}' + jenkins_common_github_configs: '{{ JENKINS_GITHUB_CONFIG }}' + jenkins_common_instance_cap: '{{ de_jenkins_instance_cap }}' + jenkins_common_seed_name: '{{ de_jenkins_seed_name }}' + jenkins_common_seed_path: '{{ de_jenkins_seed_path }}' + jenkins_common_protocol_https: false + jenkins_common_server_name: '{{ JENKINS_SERVER_NAME }}' + AUTOMATION_PRIVATE_KEY_SOURCE_PATH: null + jenkins_common_main_num_executors: '{{ JENKINS_DATA_ENGINEERING_CONCURRENT_JOBS_COUNT }}' + jenkins_common_jenkins_configuration_branch: '{{ JENKINS_CONFIGURATION_REPO_BRANCH }}' + jenkins_common_seed_job_source: '{{ de_jenkins_seed_job_source }}' + jenkins_common_dsl_script_security_enabled: false + jenkins_common_email_replyto: '{{ JENKINS_MAILER_REPLY_TO_ADDRESS }}' + jenkins_common_main_env_vars: '{{ jenkins_base_environment_variables }} + {{ jenkins_additional_environment_variables }}' + jenkins_common_ready_status_code: '{{ de_jenkins_ready_status_code }}' + + - role: mongo_client diff --git a/playbooks/roles/jenkins_data_engineering_new/tasks/main.yml b/playbooks/roles/jenkins_data_engineering_new/tasks/main.yml new file mode 100644 index 00000000000..568a7a0826d --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering_new/tasks/main.yml @@ -0,0 +1,120 @@ +--- +# Tasks for role jenkins_data_engineering +# +# Overview: +# +# This role sets up a Jenkins Instance for analytics tasks. + +- name: Setting the hostname + hostname: + name: "{{ jenkins_host_name }}" + +- name: install jenkins analytics extra system packages + apt: + pkg={{ item }} state=present update_cache=yes + with_items: "{{ JENKINS_DATA_ENGINEERING_EXTRA_PKGS }}" + tags: + - jenkins + +- name: Create /edx/var/edxapp dir + file: + path: "/edx/var/edxapp" + state: directory + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: 0755 + tags: + - jenkins-edxapp + +- name: create ssh directory for jenkins user + file: + path: "/home/{{ jenkins_user }}/.ssh" + state: directory + owner: '{{ jenkins_user }}' + group: '{{ jenkins_group }}' + tags: + - jenkins-auth + +- name: add jenkins private key + copy: + src: '{{ JENKINS_DATA_ENGINEERING_AUTOMATION_PRIVATE_KEY_SOURCE_PATH }}' + dest: '{{ jenkins_private_keyfile }}' + owner: '{{ jenkins_user }}' + group: '{{ jenkins_group }}' + mode: 0600 + tags: + - jenkins-auth + +- name: add jenkins public key + copy: + src: '{{ JENKINS_DATA_ENGINEERING_AUTOMATION_PUBLIC_KEY_SOURCE_PATH }}' + dest: '{{ jenkins_public_keyfile }}' + owner: '{{ jenkins_user }}' + group: '{{ jenkins_group }}' + mode: 0600 + tags: + - jenkins-auth + +- name: create jenkins user config dir + file: + name: "{{ jenkins_home }}/users/{{ jenkins_user }}" + state: directory + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + tags: + - jenkins-auth + +- name: template jenkins user config.xml + template: + src: jenkins.user.config.xml + dest: "{{ jenkins_home }}/users/{{ jenkins_user }}/config.xml" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + force: no # don't overwrite if already there + tags: + - jenkins-auth + +- name: fetch jenkins user public key + shell: "cat {{ jenkins_public_keyfile }}" + register: jenkins_public_key + tags: + - jenkins-auth + +- name: add jenkins user public key + lineinfile: + dest: "{{ jenkins_home }}/users/{{ jenkins_user }}/config.xml" + state: present + regexp: "^\\s*" + line: "{{ jenkins_public_key.stdout }}" + tags: + - jenkins-auth + +- name: Wait for Jenkins to start up before proceeding. + shell: "curl -D - --silent --max-time 5 {{ JENKINS_MAIN_URL }}cli/" + register: result + until: (result.stdout.find("403 Forbidden") != -1) or (result.stdout.find("200 OK") != -1) and (result.stdout.find("Please wait while") == -1) + retries: 60 + delay: 10 + changed_when: false + check_mode: no + tags: + - jenkins-auth + +- name: wipe initialization scripts from jenkins_commons + file: + path: '{{ jenkins_home }}/init.groovy.d/' + state: absent + tags: + - jenkins-auth + +- name: wipe initialization configuration files from jenkins_commons + file: + path: '{{ jenkins_home }}/init-configs/' + state: absent + tags: + - jenkins-auth + +- name: restart Jenkins + service: name=jenkins state=restarted + tags: + - jenkins-auth diff --git a/playbooks/roles/jenkins_data_engineering_new/templates/jenkins.user.config.xml b/playbooks/roles/jenkins_data_engineering_new/templates/jenkins.user.config.xml new file mode 100644 index 00000000000..1776428c6ab --- /dev/null +++ b/playbooks/roles/jenkins_data_engineering_new/templates/jenkins.user.config.xml @@ -0,0 +1,53 @@ + + + {{ jenkins_user }} + + + + + + + + + + + + + + + + + + + + + edx + shadow + jenkins + authenticated + + 1457073573763 + + + + + + + All + false + false + + + + + + + + + + + + false + + + diff --git a/playbooks/roles/jenkins_de/meta/main.yml b/playbooks/roles/jenkins_de/meta/main.yml deleted file mode 100644 index 16b2b250d08..00000000000 --- a/playbooks/roles/jenkins_de/meta/main.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -dependencies: - - common - - role: jenkins_common - JENKINS_SERVER_NAME: 'scheduler.analytics.edx.org' - - jenkins_common_version: '{{ de_jenkins_version }}' - jenkins_common_war_source: '{{ de_jenkins_common_war_source }}' - jenkins_common_user_uid: '{{ de_jenkins_user_uid }}' - jenkins_common_group_gid: '{{ de_jenkins_group_gid }}' - jenkins_common_jvm_args: '{{ de_jenkins_jvm_args }}' - jenkins_common_configuration_scripts: '{{ de_jenkins_configuration_scripts }}' - jenkins_common_template_files: '{{ de_jenkins_template_files }}' - jenkins_common_plugins_list: '{{ de_jenkins_plugins_list }}' - jenkins_common_ghprb_white_list_phrase: '{{ de_jenkins_ghprb_white_list_phrase }}' - jenkins_common_ghprb_ok_phrase: '{{ de_jenkins_ghprb_ok_phrase }}' - jenkins_common_ghprb_retest_phrase: '{{ de_jenkins_ghprb_retest_phrase }}' - jenkins_common_ghprb_skip_phrase: '{{ de_jenkins_ghprb_skip_phrase }}' - jenkins_common_ghprb_cron_schedule: '{{ de_jenkins_ghprb_cron_schedule }}' - jenkins_common_github_configs: '{{ JENKINS_GITHUB_CONFIG }}' - jenkins_common_hipchat_room: '{{ de_jenkins_hipchat_room }}' - jenkins_common_instance_cap: '{{ de_jenkins_instance_cap }}' - jenkins_common_seed_name: '{{ de_jenkins_seed_name }}' - jenkins_common_log_list: '{{ de_jenkins_log_list }}' - jenkins_common_history_max_days: '{{ de_jenkins_history_max_days }}' - jenkins_common_history_exclude_pattern: '{{ de_jenkins_history_exclude_pattern }}' - jenkins_common_server_name: '{{ JENKINS_SERVER_NAME }}' - JENKINS_MAIN_GITHUB_OWNER_WHITELIST: '' diff --git a/playbooks/roles/jenkins_it/defaults/main.yml b/playbooks/roles/jenkins_it/defaults/main.yml new file mode 100644 index 00000000000..3606ad9bef9 --- /dev/null +++ b/playbooks/roles/jenkins_it/defaults/main.yml @@ -0,0 +1,310 @@ +it_jenkins_user_uid: 1002 +it_jenkins_group_gid: 1004 +it_jenkins_groups: 'jenkins,docker' +IT_JENKINS_VERSION: jenkins_2.150.2 +it_jenkins_version: "{{ IT_JENKINS_VERSION }}" +it_jenkins_jvm_args: '-Djava.awt.headless=true -Xmx16384m -DsessionTimeout=60' +it_jenkins_main_num_executors: 5 + +oracle_path: '/opt/oracle' +IT_ORACLE_S3_PATH: '' +IT_ORACLE_INSTANT_CLIENT: '' +it_oracle_packages: + - libaio1 + +it_jenkins_python_versions: + - python3.5-dev + +it_jenkins_configuration_scripts: + - 1addJarsToClasspath.groovy + - 2checkInstalledPlugins.groovy + - 3addUsers.groovy + - 3importCredentials.groovy + - 3installGroovy.groovy + - 3installPython.groovy + - 3mainConfiguration.groovy + - 3setGlobalProperties.groovy + - 3shutdownCLI.groovy + - 4configureGHPRB.groovy + - 4configureGit.groovy + - 4configureGithub.groovy + - 4configureMailerPlugin.groovy + - 4configureMaskPasswords.groovy + - 4configureSAML.groovy + - 4configureSecurity.groovy + - 4configureSlack.groovy + - 5createLoggers.groovy + +jenkins_it_non_plugin_template_files: + - user_config + - credentials + - email_ext_config + - ghprb_config + - git_config + - github_config + - groovy_config + - job_config_history + - log_config + - mailer_config + - main_config + - mask_passwords_config + - properties_config + - python_config + - saml_config + - security + - seed_config + - slack_config + +it_jenkins_plugins_list: + - name: 'analysis-core' + version: '1.95' + group: 'org.jvnet.hudson.plugins' + - name: 'ansicolor' + version: '0.5.2' + group: 'org.jenkins-ci.plugins' + - name: 'ant' + version: '1.8' + group: 'org.jenkins-ci.plugins' + - name: 'antisamy-markup-formatter' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'aws-credentials' + version: '1.24' + group: 'org.jenkins-ci.plugins' + - name: 'aws-java-sdk' + version: '1.11.457' + group: 'org.jenkins-ci.plugins' + - name: 'badge' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'bouncycastle-api' + version: '2.17' + group: 'org.jenkins-ci.plugins' + - name: 'build-name-setter' + version: '1.3' + group: 'org.jenkins-ci.plugins' + - name: 'build-timeout' + version: '1.19' + group: 'org.jenkins-ci.plugins' + - name: 'build-user-vars-plugin' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'cobertura' + version: '1.12.1' + group: 'org.jenkins-ci.plugins' + - name: 'copyartifact' + version: '1.39' + group: 'org.jenkins-ci.plugins' + - name: 'credentials' + version: '2.1.18' + group: 'org.jenkins-ci.plugins' + - name: 'credentials-binding' + version: '1.15' + group: 'org.jenkins-ci.plugins' + - name: 'cvs' + version: '2.14' + group: 'org.jenkins-ci.plugins' + - name: 'docker-commons' + version: '1.8' + group: 'org.jenkins-ci.plugins' + - name: 'email-ext' + version: '2.62' + group: 'org.jenkins-ci.plugins' + - name: 'envinject' + version: '2.1.5' + group: 'org.jenkins-ci.plugins' + - name: 'exclusive-execution' + version: '0.8' + group: 'org.jenkins-ci.plugins' + - name: 'external-monitor-job' + version: '1.4' + group: 'org.jenkins-ci.plugins' + - name: 'ghprb' + version: '1.42.0' + group: 'org.jenkins-ci.plugins' + - name: 'git' + version: '3.9.3' + group: 'org.jenkins-ci.plugins' + - name: 'github' + version: '1.29.2' + group: 'com.coravy.hudson.plugins.github' + - name: 'github-api' + version: '1.90' + group: 'org.jenkins-ci.plugins' + - name: 'github-branch-source' + version: '2.3.6' + group: 'org.jenkins-ci.plugins' + - name: 'gradle' + version: '1.29' + group: 'org.jenkins-ci.plugins' + - name: 'groovy' + version: '2.1' + group: 'org.jenkins-ci.plugins' + - name: 'groovy-postbuild' + version: '2.4' + group: 'org.jvnet.hudson.plugins' + - name: 'htmlpublisher' + version: '1.16' + group: 'org.jenkins-ci.plugins' + - name: 'javadoc' + version: '1.3' + group: 'org.jenkins-ci.plugins' + - name: 'job-dsl' + version: '1.70' + group: 'org.jenkins-ci.plugins' + - name: 'junit' + version: '1.26' + group: 'org.jenkins-ci.plugins' + - name: 'mailer' + version: '1.21' + group: 'org.jenkins-ci.plugins' + - name: 'mask-passwords' + version: '2.10.1' + group: 'org.jenkins-ci.plugins' + - name: 'matrix-auth' + version: '1.5' + group: 'org.jenkins-ci.plugins' + - name: 'matrix-project' + version: '1.13' + group: 'org.jenkins-ci.plugins' + - name: 'maven-plugin' + version: '3.1.2' + group: 'org.jenkins-ci.main' + - name: 'monitoring' + version: '1.76.0' + group: 'org.jvnet.hudson.plugins' + - name: 'multiple-scms' + version: '0.6' + group: 'org.jenkins-ci.plugins' + - name: 'nodelabelparameter' + version: '1.7.2' + group: 'org.jenkins-ci.plugins' + - name: 'pam-auth' + version: '1.4' + group: 'org.jenkins-ci.plugins' + - name: 'parameterized-trigger' + version: '2.35.2' + group: 'org.jenkins-ci.plugins' + - name: 'pipeline-model-definition' + version: '1.2.9' + group: 'org.jenkinsci.plugins' + - name: 'pipeline-build-step' + version: '2.5.1' + group: 'org.jenkins-ci.plugins' + - name: 'pipeline-utility-steps' + version: '2.0.2' + group: 'org.jenkins-ci.plugins' + - name: 'PrioritySorter' + version: '2.9' + group: 'org.jenkins-ci.plugins' + - name: 'rebuild' + version: '1.29' + group: 'com.sonyericsson.hudson.plugins.rebuild' + - name: 'resource-disposer' + version: '0.12' + group: 'org.jenkins-ci.plugins' + - name: 'run-condition' + version: '1.0' + group: 'org.jenkins-ci.plugins' + - name: 'saml' + version: '1.1.0' + group: 'org.jenkins-ci.plugins' + - name: 'script-security' + version: '1.53' + group: 'org.jenkins-ci.plugins' + - name: 'shiningpanda' + version: '0.23' + group: 'org.jenkins-ci.plugins' + - name: 'slack' + version: '2.2' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-agent' + version: '1.17' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-credentials' + version: '1.14' + group: 'org.jenkins-ci.plugins' + - name: 'ssh-slaves' + version: '1.28.1' + group: 'org.jenkins-ci.plugins' + - name: 'structs' + version: '1.17' + group: 'org.jenkins-ci.plugins' + - name: 'timestamper' + version: '1.8.9' + group: 'org.jenkins-ci.plugins' + - name: 'token-macro' + version: '2.6' + group: 'org.jenkins-ci.plugins' + - name: 'translation' + version: '1.16' + group: 'org.jenkins-ci.plugins' + - name: 'violations' + version: '0.7.11' + group: 'org.jenkins-ci.plugins' + - name: 'warnings' + version: '5.0.1' + group: 'org.jvnet.hudson.plugins' + - name: 'warnings-ng' + version: '2.2.1' + group: 'io.jenkins.plugins' + - name: 'workflow-aggregator' + version: '2.5' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-cps' + version: '2.46' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-durable-task-step' + version: '2.18' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'workflow-support' + version: '2.18' + group: 'org.jenkins-ci.plugins.workflow' + - name: 'ws-cleanup' + version: '0.34' + group: 'org.jenkins-ci.plugins' + - name: 'xunit' + version: '1.93' + group: 'org.jenkins-ci.plugins' + +# ghprb +it_jenkins_ghprb_white_list_phrase: '.*[Aa]dd\W+to\W+whitelist.*' +it_jenkins_ghprb_ok_phrase: '.*ok\W+to\W+test.*' +it_jenkins_ghprb_retest_phrase: '.*jenkins\W+run\W+all.*' +it_jenkins_ghprb_skip_phrase: '.*\[[Ss]kip\W+ci\].*' +it_jenkins_ghprb_cron_schedule: 'H/5 * * * *' + +# github +JENKINS_GITHUB_CONFIG: '' + +# seed +it_jenkins_seed_name: 'manually_seed_one_job' + +# logs +it_jenkins_log_list: + - LOG_RECORDER: 'Ghprb' + LOGGERS: + - name: 'org.jenkinsci.plugins.ghprb.GhprbPullRequest' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.ghprb.GhprbRootAction' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.ghprb.GhprbRepository' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.ghprb.GhprbGitHub' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.ghprb.Ghprb' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.ghprb.GhprbTrigger' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.ghprb.GhprbBuilds' + log_level: 'ALL' + - LOG_RECORDER: 'GithubPushLogs' + LOGGERS: + - name: 'com.cloudbees.jenkins.GitHubPushTrigger' + log_level: 'ALL' + - name: 'org.jenkinsci.plugins.github.webhook.WebhookManager' + log_level: 'ALL' + - name: 'com.cloudbees.jenkins.GitHubWebHook' + log_level: 'ALL' + - name: 'hudson.plugins.git.GitSCM' + log_level: 'ALL' diff --git a/playbooks/roles/jenkins_it/meta/main.yml b/playbooks/roles/jenkins_it/meta/main.yml new file mode 100644 index 00000000000..08900e21e6a --- /dev/null +++ b/playbooks/roles/jenkins_it/meta/main.yml @@ -0,0 +1,25 @@ +--- +dependencies: + - common + - role: jenkins_common + jenkins_common_version: '{{ it_jenkins_version }}' + jenkins_common_user_uid: '{{ it_jenkins_user_uid }}' + jenkins_common_group_gid: '{{ it_jenkins_group_gid }}' + jenkins_common_groups: '{{ it_jenkins_groups }}' + jenkins_common_jvm_args: '{{ it_jenkins_jvm_args }}' + jenkins_common_configuration_scripts: '{{ it_jenkins_configuration_scripts }}' + jenkins_common_template_files: '{{ it_jenkins_template_files }}' + jenkins_common_plugins_list: '{{ it_jenkins_plugins_list }}' + jenkins_common_ghprb_white_list_phrase: '{{ it_jenkins_ghprb_white_list_phrase }}' + jenkins_common_ghprb_ok_phrase: '{{ it_jenkins_ghprb_ok_phrase }}' + jenkins_common_ghprb_retest_phrase: '{{ it_jenkins_ghprb_retest_phrase }}' + jenkins_common_ghprb_skip_phrase: '{{ it_jenkins_ghprb_skip_phrase }}' + jenkins_common_ghprb_cron_schedule: '{{ it_jenkins_ghprb_cron_schedule }}' + jenkins_common_github_configs: '{{ JENKINS_GITHUB_CONFIG }}' + jenkins_common_seed_name: '{{ it_jenkins_seed_name }}' + jenkins_common_log_list: '{{ it_jenkins_log_list }}' + jenkins_common_server_name: '{{ JENKINS_SERVER_NAME }}' + jenkins_common_email_replyto: '{{ JENKINS_MAILER_REPLY_TO_ADDRESS }}' + jenkins_common_python_versions: '{{ it_jenkins_python_versions }}' + jenkins_common_non_plugin_template_files: '{{ jenkins_it_non_plugin_template_files }}' + jenkins_common_main_num_executors: '{{ it_jenkins_main_num_executors }}' diff --git a/playbooks/roles/jenkins_it/tasks/main.yml b/playbooks/roles/jenkins_it/tasks/main.yml new file mode 100644 index 00000000000..a2fbb3429a5 --- /dev/null +++ b/playbooks/roles/jenkins_it/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: make oracle directory for instant client + file: + path: "{{ oracle_path }}" + state: directory + owner: "root" + group: "root" + mode: 0755 + tags: + - install + - install:system-requirements +- name: download instant client + shell: "aws s3 cp {{ IT_ORACLE_S3_PATH }}/{{ IT_ORACLE_INSTANT_CLIENT }} {{ oracle_path }}" + tags: + - install + - install:system-requirements +- name: unzip instant client + unarchive: + src: "{{ oracle_path }}/{{ IT_ORACLE_INSTANT_CLIENT }}" + dest: "{{ oracle_path }}" + creates: "{{ oracle_path }}/instantclient_12_2" + tags: + - install + - install:system-requirements +- name: clean up zip file + file: + path: "oracle_path/{{ IT_ORACLE_INSTANT_CLIENT }}" + state: absent +- name: Install oracle specific packages + apt: + name: '{{ item }}' + state: present + update_cache: yes + with_items: '{{ it_oracle_packages }}' + tags: + - install + - install:system-requirements diff --git a/playbooks/roles/jenkins_master/defaults/main.yml b/playbooks/roles/jenkins_master/defaults/main.yml index c44f76c66a8..16839f145e7 100644 --- a/playbooks/roles/jenkins_master/defaults/main.yml +++ b/playbooks/roles/jenkins_master/defaults/main.yml @@ -6,7 +6,8 @@ jenkins_port: 8080 jenkins_nginx_port: 80 jenkins_protocol_https: true -jenkins_version: '1.651.3' +JENKINS_VERSION: '1.651.3' +jenkins_version: "{{ JENKINS_VERSION }}" jenkins_deb_url: "https://pkg.jenkins.io/debian-stable/binary/jenkins_{{ jenkins_version }}_all.deb" jenkins_deb: "jenkins_{{ jenkins_version }}_all.deb" # Jenkins jvm args are set when starting the Jenkins service, e.g., "-Xmx1024m" diff --git a/playbooks/roles/jenkins_master/tasks/main.yml b/playbooks/roles/jenkins_master/tasks/main.yml index 8606c7c0b7d..2cbb7b2c8a3 100644 --- a/playbooks/roles/jenkins_master/tasks/main.yml +++ b/playbooks/roles/jenkins_master/tasks/main.yml @@ -1,10 +1,9 @@ --- - name: Install jenkins specific system packages apt: - name: "{{ item }}" + name: "{{ jenkins_debian_pkgs }}" state: present update_cache: yes - with_items: "{{ jenkins_debian_pkgs }}" tags: - jenkins - install @@ -12,10 +11,9 @@ - name: Install jenkins extra system packages apt: - name: "{{ item }}" + name: "{{ JENKINS_EXTRA_PKGS }}" state: present update_cache: yes - with_items: "{{ JENKINS_EXTRA_PKGS }}" tags: - jenkins - install diff --git a/playbooks/roles/jenkins_worker/defaults/main.yml b/playbooks/roles/jenkins_worker/defaults/main.yml index e8f19e71434..19ac24daade 100644 --- a/playbooks/roles/jenkins_worker/defaults/main.yml +++ b/playbooks/roles/jenkins_worker/defaults/main.yml @@ -1,9 +1,13 @@ --- jenkins_user: "jenkins" +# the main group for the jenkins user jenkins_group: "jenkins" +# the groups that the jenkins user must belong to +jenkins_groups: "jenkins,docker" jenkins_home: /home/jenkins -jenkins_edx_platform_version: master +JENKINS_EDX_PLATFORM_VERSION: master +jenkins_edx_platform_version: "{{ JENKINS_EDX_PLATFORM_VERSION }}" # System packages jenkins_debian_pkgs: @@ -12,6 +16,25 @@ jenkins_debian_pkgs: - python-dev - libsqlite3-dev - libfreetype6-dev + - libpq-dev # packer direct download URL -packer_url: "https://releases.hashicorp.com/packer/0.8.6/packer_0.8.6_linux_amd64.zip" +packer_url: "https://releases.hashicorp.com/packer/1.4.4/packer_1.4.4_linux_amd64.zip" + +JENKINS_NODE_VERSION: "12" +jenkins_node_version: "{{ JENKINS_NODE_VERSION }}" +ansible_distribution_release: "xenial" + +jenkins_worker_python_versions: + - 2.7 + - 3.5 + - 3.6 + - 3.8 + +# The packaging for Python 3.7 and above split distutils out into a separate package; +# needed for virtualenv creation +jenkins_worker_distutils_versions: + - 3.8 + +edx_platform_python_versions: + - 3.5 diff --git a/playbooks/roles/jenkins_worker/meta/main.yml b/playbooks/roles/jenkins_worker/meta/main.yml index deb1bece323..84ede1ee097 100644 --- a/playbooks/roles/jenkins_worker/meta/main.yml +++ b/playbooks/roles/jenkins_worker/meta/main.yml @@ -1,7 +1,8 @@ --- dependencies: - common - - jscover + - role: jscover + when: platform_worker is defined - role: oraclejdk # dependencies for edx-app jenkins worker: @@ -12,31 +13,33 @@ dependencies: - role: android_sdk when: android_worker is defined # User/group to manage Android SDK - android_user: "jenkins" - android_group: "jenkins" + android_user: "android" + android_group: "android" # Tarball to download - android_download: "android-sdk_r24.4.1-linux.tgz" + # old path "android-sdk_r24.4.1-linux.tgz" replaced + android_download: "sdk-tools-linux-4333796.zip" + # Checksum of Android SDK (from: https://developer.android.com/studio/index.html#downloads) - android_checksum: "725bb360f0f7d04eaccff5a2d57abdd49061326d" + # Old Checksum "725bb360f0f7d04eaccff5a2d57abdd49061326d" - replaced + android_checksum: "8c7c28554a32318461802c1291d76fccfafde054" + # path to installed android sdk android_home: "/opt/android-sdk-linux" - # individual android build targets to be downloaded via the android sdk manager - android_build_targets: - - android-23 - - android-21 - # other android dependencies that cannot be tested via the android sdk manager. instead, stat the android_test_path - # to test for presence of the package + + # The SDK version used to compile the project | 6 | Android SDK Platform 28 + android_build_targets: "\"platforms;android-28\"" + + # other android dependencies that cannot be tested via the android sdk manager. instead, stat the android_test_path to test for presence of the package + # Plateform Tools | 6 | Android SDK Platform 28 + # The BuildTools | Android SDK Build-Tools 28.0.3 + # Additional components + # extras;google;m2repository | 58 | Google Repository + # extras;android;m2repository| 47.0.0 | Android Support Repository android_tools: - - { package: 'platform-tools', android_test_path: 'platform-tools' } - - { package: 'build-tools-23.0.3', android_test_path: 'build-tools/23.0.3' } - - { package: 'extra-google-m2repository', android_test_path: 'extras/google/m2repository' } - - { package: 'extra-android-m2repository', android_test_path: 'extras/android/m2repository' } - - { package: 'sys-img-armeabi-v7a-android-21', android_test_path: 'system-images/android-21/default/armeabi-v7a/' } - # - { package: 'sys-img-armeabi-v7a-android-23', android_test_path: 'system-images/android-23/default/armeabi-v7a/' } - # libraries needed for avd(android virtual device) emulation - android_apt_libraries: - - lib32stdc++6 - - lib32z1 + - { package: "\"platform-tools\"", android_test_path: 'platform-tools' } + - { package: "\"build-tools;28.0.3\"", android_test_path: 'build-tools/28.0.3' } + - { package: "\"extras;google;m2repository\"", android_test_path: 'extras/google/m2repository' } + - { package: "\"extras;android;m2repository\"", android_test_path: 'extras/android/m2repository' } # dependencies for loadtest driver worker - role: loadtest_driver diff --git a/playbooks/roles/jenkins_worker/tasks/main.yml b/playbooks/roles/jenkins_worker/tasks/main.yml index 732ad86d1fa..76a94f9feb4 100644 --- a/playbooks/roles/jenkins_worker/tasks/main.yml +++ b/playbooks/roles/jenkins_worker/tasks/main.yml @@ -8,6 +8,7 @@ - include: packer.yml - include: system.yml - include: python.yml +- include: node.yml # only platform workers - include: python_platform_worker.yml diff --git a/playbooks/roles/jenkins_worker/tasks/node.yml b/playbooks/roles/jenkins_worker/tasks/node.yml new file mode 100644 index 00000000000..606e513c6cf --- /dev/null +++ b/playbooks/roles/jenkins_worker/tasks/node.yml @@ -0,0 +1,16 @@ +--- +# Install nodejs + +- name: Install the gpg key for nodejs LTS + apt_key: + url: "https://deb.nodesource.com/gpgkey/nodesource.gpg.key" + state: present +- name: Install the nodejs LTS repos + apt_repository: + repo: "deb https://deb.nodesource.com/node_{{ jenkins_node_version }}.x {{ ansible_distribution_release }} main" + state: present + update_cache: yes +- name: Install the nodejs + apt: + name: nodejs + state: present diff --git a/playbooks/roles/jenkins_worker/tasks/python.yml b/playbooks/roles/jenkins_worker/tasks/python.yml index 2db861013b7..fbccdeb9fc5 100644 --- a/playbooks/roles/jenkins_worker/tasks/python.yml +++ b/playbooks/roles/jenkins_worker/tasks/python.yml @@ -1,5 +1,40 @@ --- +# Versions of Python newer than 3.5 are not available in the default +# package index for Ubuntu 16.04. Add the deadsnakes PPA for anything +# newer +- name: add deadsnakes PPA for newer Python versions + apt_repository: + repo: "ppa:deadsnakes/ppa" + update_cache: yes + when: ansible_distribution_release == 'xenial' + +# Install newer versions of python for testing, but do not set them +# as the default version +- name: Install python versions + apt: + name: 'python{{ item }}' + state: present + update_cache: yes + with_items: '{{ jenkins_worker_python_versions }}' + +# Install 'dev' packages for each version of python that is installed +- name: Install python dev packages + apt: + name: 'python{{ item }}-dev' + state: present + update_cache: yes + with_items: '{{ jenkins_worker_python_versions }}' + +# Install 'distutils' packages for each installed version of python which has one +- name: Install python distutils packages + apt: + name: 'python{{ item }}-distutils' + state: present + update_cache: yes + with_items: '{{ jenkins_worker_distutils_versions }}' # Requests library is required for the github status script. - name: Install requests Python library - pip: name=requests state=present + pip: + name: requests + state: present diff --git a/playbooks/roles/jenkins_worker/tasks/python_platform_worker.yml b/playbooks/roles/jenkins_worker/tasks/python_platform_worker.yml index 6860afe234d..8af286a5760 100644 --- a/playbooks/roles/jenkins_worker/tasks/python_platform_worker.yml +++ b/playbooks/roles/jenkins_worker/tasks/python_platform_worker.yml @@ -13,25 +13,64 @@ depth: 1 become_user: "{{ jenkins_user }}" +# In order to create multiple virtualenvs with the same name, +# put them into separate directories +- name: Create directories for virtualenvs to avoid naming collisions + file: + path: "{{ jenkins_home }}/edx-venv-{{ item }}" + state: directory + with_items: "{{ jenkins_worker_python_versions }}" + become_user: "{{ jenkins_user }}" + +# Combine testing and django requirements files for single virtualenv invocation +- name: Combine requirements files + shell: "cat {{ jenkins_home }}/shallow-clone/requirements/edx/testing.txt {{ jenkins_home }}/shallow-clone/requirements/edx/django.txt > {{ jenkins_home }}/shallow-clone/requirements/edx/jenkins.txt" + become_user: "{{ jenkins_user }}" + # Install the platform requirements using pip. - name: Install edx-platform requirements using pip pip: chdir: "{{ jenkins_home }}/shallow-clone" - requirements: "{{ jenkins_home }}/shallow-clone/requirements/edx/{{ item }}" + requirements: "{{ jenkins_home }}/shallow-clone/requirements/edx/jenkins.txt" extra_args: "--exists-action=w" - virtualenv: "{{ jenkins_home }}/edx-venv" + virtualenv: "{{ jenkins_home }}/edx-venv-{{ item }}/edx-venv" virtualenv_command: virtualenv - with_items: - - django.txt - - testing.txt + virtualenv_python: "python{{ item }}" + with_items: "{{ edx_platform_python_versions }}" become_user: "{{ jenkins_user }}" -# Archive the current state of the virtualenv +# Archive the current state of each of the virtualenvs # as a starting point for new builds. -# The edx-venv directory is deleted and then recreated +# The edx-venv-x directory is deleted and then recreated # cleanly from the archive by the jenkins build scripts. - name: Create a clean virtualenv archive - command: "tar -cpzf edx-venv_clean.tar.gz edx-venv" + command: "tar -C edx-venv-{{ item }} -cpzf edx-venv_clean-{{ item }}.tar.gz edx-venv" + args: + chdir: "{{ jenkins_home }}" + with_items: "{{ edx_platform_python_versions }}" + become_user: "{{ jenkins_user }}" + +- name: Add script to set install node packages + template: + src: jenkins_nodeenv.j2 + dest: "{{ jenkins_home }}/jenkins_nodeenv" + owner: "{{ jenkins_user }}" + group: "{{ jenkins_group }}" + mode: "0500" + when: platform_worker is defined + +# Install node packages for platform. +- name: Install node packages for caching + command: "bash {{ jenkins_home }}/jenkins_nodeenv" + become_user: "{{ jenkins_user }}" + +# Archive the current state of the npm cache as a starting +# point for new builds. Most builds don't introduce package +# changes, so npm won't need to fetch anything from a remote +# registry. This should reduce the frequency of hanging +# npm installs (see https://openedx.atlassian.net/browse/TE-2732) +- name: Create a clean npm cache archive + command: "tar -cpzf edx-npm-cache_clean.tar.gz .npm" args: chdir: "{{ jenkins_home }}" become_user: "{{ jenkins_user }}" diff --git a/playbooks/roles/jenkins_worker/tasks/system.yml b/playbooks/roles/jenkins_worker/tasks/system.yml index 34de9581a9b..7a1a375a7a9 100644 --- a/playbooks/roles/jenkins_worker/tasks/system.yml +++ b/playbooks/roles/jenkins_worker/tasks/system.yml @@ -4,7 +4,11 @@ # The Jenkins account needs a login shell because Jenkins uses scp - name: Add the jenkins user to the group and configure shell - user: name={{ jenkins_user }} append=yes group={{ jenkins_group }} shell=/bin/bash + user: + name: '{{ jenkins_user }}' + groups: '{{ jenkins_groups }}' + append: yes + shell: /bin/bash # Because of a bug in the latest release of the EC2 plugin # we need to use a key generated by Amazon (not imported) diff --git a/playbooks/roles/jenkins_worker/tasks/test_android_worker.yml b/playbooks/roles/jenkins_worker/tasks/test_android_worker.yml index 6d3a2e1c5d9..db378bfaf60 100644 --- a/playbooks/roles/jenkins_worker/tasks/test_android_worker.yml +++ b/playbooks/roles/jenkins_worker/tasks/test_android_worker.yml @@ -1,47 +1,88 @@ --- -# Test that the Android sdk has been installed to the correct location -- name: Verify Android SDK is installed +# Verify Android Platform +- name: Verify that Android Platform is installed + shell: "stat {{ android_home }}/platforms/android-28" + register: android_plateform +- assert: + that: + - "android_plateform.rc == 0" + +# Verify Android Platform Tools +- name: Verify that Android Platform Tools are installed + shell: "stat {{ android_home }}/platform-tools" + register: android_plateform_tools +- assert: + that: + - "android_plateform_tools.rc == 0" + +# Verify Android Build Tools +- name: Verify that Android Build Tools are installed + shell: "stat {{ android_home }}/build-tools" + register: android_build_tools +- assert: + that: + - "android_build_tools.rc == 0" + +# Verify Android Google Repository +- name: Verify that Android Google Repository is installed + shell: "stat {{ android_home }}/extras/google/m2repository" + register: android_google_repo +- assert: + that: + - "android_google_repo.rc == 0" + +# Verify Android Repository +- name: Verify that Android Repository is installed + shell: "stat {{ android_home }}/extras/android/m2repository" + register: android_repo +- assert: + that: + - "android_repo.rc == 0" + +# Verify system image +- name: Verify that Android sys image is installed + shell: "stat {{ android_home }}/system-images/android-28" + register: android_27_image +- assert: + that: + - "android_27_image.rc == 0" + +# Verify Android tool +- name: Verify Android tool is installed shell: "stat {{ android_home }}/tools/android" register: android_version - assert: that: - "android_version.rc == 0" -# Test that the necessary build targets for building the edx app have -# been installed. -- name: Verify correct Android build targets installed - shell: "{{ android_home }}/tools/android list target |tr '\n' ' '" - register: build_targets -- with_items: "{{ android_build_targets }}" - assert: - that: - - "'{{ item }}' in build_targets.stdout" -# Put the paths of downloaded packages into a list, so they can be iterated over -# in an assert block -- name: Gather paths of installed Android packages into a list - util_map: - function: 'zip_to_list' - input: "{{ android_tools }}" - args: - - "android_test_path" - register: android_test_paths -# Test that the correct android tools are installed -- name: Verify build tools installed - stat: path={{ android_home }}/{{ item }} - register: android_package_stats - # When this task is skipped, ansible still tries to resolve the variable - # returned from zip_to_list, but cannot. Add a default value to avoid these - # failures when other tasks are run, but not mask true failures when this - # task is run. - with_items: "{{ android_test_paths.function_output | default(['non-existent-package']) }}" -- with_items: "{{ android_package_stats.results }}" - assert: - that: - - "item.stat.exists == True" -# TEMP: until we either change to using a different system image OR google hosts -# the android 23 image again -- name: Verify that cached Android sys image is installed - shell: "stat {{ android_home }}/system-images/android-23" - register: android_23_stat -- assert: - that: - - "android_23_stat.rc == 0" + +# Verify Emulator tool +- name: Verify Emulator tool is installed + shell: "stat {{ android_home }}/tools/emulator" + register: emulator_version +- assert: + that: + - "emulator_version.rc == 0" + +# Verify AVD Manager tool +- name: Verify AVD Manager tool is installed + shell: "stat {{ android_home }}/tools/bin/avdmanager" + register: avdmanager_version +- assert: + that: + - "avdmanager_version.rc == 0" + +# Verify SDK Manager tool +- name: Verify SDK Manager tool is installed + shell: "stat {{ android_home }}/tools/bin/sdkmanager" + register: sdkmanager_version +- assert: + that: + - "sdkmanager_version.rc == 0" + +# Verify adb tool +- name: Verify adb tool is installed + shell: "stat {{ android_home }}/platform-tools/adb" + register: adb_version +- assert: + that: + - "adb_version.rc == 0" \ No newline at end of file diff --git a/playbooks/roles/jenkins_worker/tasks/test_codejail_worker.yml b/playbooks/roles/jenkins_worker/tasks/test_codejail_worker.yml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/playbooks/roles/jenkins_worker/tasks/test_platform_worker.yml b/playbooks/roles/jenkins_worker/tasks/test_platform_worker.yml index 99dc8d8e9cd..c72f71d9f7a 100644 --- a/playbooks/roles/jenkins_worker/tasks/test_platform_worker.yml +++ b/playbooks/roles/jenkins_worker/tasks/test_platform_worker.yml @@ -22,11 +22,11 @@ register: firefox_version - assert: that: - - "'59' in firefox_version.stdout" + - "'61' in firefox_version.stdout" # Verify the virtualenv tar is newly-built - name: Get info on virtualenv tar - stat: path={{ jenkins_home }}/edx-venv_clean.tar.gz + stat: path={{ jenkins_home }}/edx-venv_clean-3.5.tar.gz register: edxvenv - assert: that: diff --git a/playbooks/roles/jenkins_worker/templates/jenkins_nodeenv.j2 b/playbooks/roles/jenkins_worker/templates/jenkins_nodeenv.j2 index 9609280ebe9..ceaa116a96d 100644 --- a/playbooks/roles/jenkins_worker/templates/jenkins_nodeenv.j2 +++ b/playbooks/roles/jenkins_worker/templates/jenkins_nodeenv.j2 @@ -5,5 +5,4 @@ # installs will be cached and used as a baseline for installations # in future test runs. cd {{ jenkins_home }}/shallow-clone -source scripts/jenkins-common.sh npm install diff --git a/playbooks/roles/journals/defaults/main.yml b/playbooks/roles/journals/defaults/main.yml deleted file mode 100644 index 847b9c1e99f..00000000000 --- a/playbooks/roles/journals/defaults/main.yml +++ /dev/null @@ -1,201 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS -# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -## -# Defaults for role journals -# - -JOURNALS_GIT_IDENTITY: !!null - -# -# vars are namespace with the module name. -# -journals_service_name: "{{ 'journals' if JOURNALS_ENABLED else 'None' }}" -journals_gunicorn_port: 8606 - -journals_environment: - JOURNALS_CFG: "{{ COMMON_CFG_DIR }}/{{ journals_service_name }}.yml" - -journals_user: "{{ journals_service_name if JOURNALS_ENABLED else 'None' }}" -journals_home: "{{ COMMON_APP_DIR }}/{{ journals_service_name }}" -journals_code_dir_path: "{{ journals_home }}/{{ journals_service_name }}" -journals_code_dir: "{{ journals_code_dir_path if JOURNALS_ENABLED else 'None' }}" - -# -# OS packages -# - -journals_debian_pkgs: - - libxml2-dev - - libxslt-dev - - libjpeg-dev - - -JOURNALS_NGINX_PORT: "1{{ journals_gunicorn_port }}" -JOURNALS_SSL_NGINX_PORT: "4{{ journals_gunicorn_port }}" - -JOURNALS_DEFAULT_DB_NAME: "{{ 'journals' if JOURNALS_ENABLED else '' }}" -JOURNALS_MYSQL: 'localhost' -# MySQL usernames are limited to 16 characters -JOURNALS_MYSQL_USER: 'journ001' -JOURNALS_MYSQL_PASSWORD: 'password' - -# use port 9500 as elasticsearch5 will be run in docker container on this port -JOURNALS_ELASTICSEARCH_URL: 'http://127.0.0.1:9500' -JOURNALS_ELASTICSEARCH_INDEX_NAME: 'journals' - -JOURNALS_MEMCACHE: [ 'memcache' ] - -JOURNALS_VERSION: "master" -JOURNALS_DJANGO_SETTINGS_MODULE: "journals.settings.production" -JOURNALS_URL_ROOT: 'http://localhost:{{ JOURNALS_NGINX_PORT }}' -JOURNALS_LOGOUT_URL: '{{ JOURNALS_URL_ROOT }}/logout/' - -JOURNALS_SECRET_KEY: 'Your secret key here' - -JOURNALS_LANGUAGE_CODE: 'en' - -## Configuration for django-parler package. For more information visit -## https://django-parler.readthedocs.io/en/latest/configuration.html#parler-languages -JOURNALS_PARLER_DEFAULT_LANGUAGE_CODE: '{{JOURNALS_LANGUAGE_CODE}}' -JOURNALS_PARLER_LANGUAGES : - 1: - - code: 'en' - default: - fallbacks: - - '{{JOURNALS_PARLER_DEFAULT_LANGUAGE_CODE}}' - hide_untranslated: 'False' - -JOURNALS_DEFAULT_PARTNER_ID: 1 -JOURNALS_SESSION_EXPIRE_AT_BROWSER_CLOSE: false - -# Used to automatically configure OAuth2 Client -JOURNALS_SOCIAL_AUTH_EDX_OIDC_KEY : 'journals-key' -JOURNALS_SOCIAL_AUTH_EDX_OIDC_SECRET : 'journals-secret' -JOURNALS_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false - -JOURNALS_PLATFORM_NAME: 'Your Platform Name Here' - -JOURNALS_DATA_DIR: '{{ COMMON_DATA_DIR }}/{{ journals_service_name }}' -JOURNALS_MEDIA_ROOT: '{{ JOURNALS_DATA_DIR }}/media' -JOURNALS_MEDIA_URL: '/media/' - -JOURNALS_MEDIA_STORAGE_BACKEND: - DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' - MEDIA_ROOT: '{{ JOURNALS_MEDIA_ROOT }}' - MEDIA_URL: '{{ JOURNALS_MEDIA_URL }}' - -JOURNALS_STATICFILES_STORAGE: 'django.contrib.staticfiles.storage.StaticFilesStorage' - -# You can set different email backends with django: -# https://docs.djangoproject.com/en/1.9/topics/email/#email-backends -JOURNALS_EMAIL_BACKEND: 'django_ses.SESBackend' - -# For email backend django-ses, the following settings are required -JOURNALS_AWS_SES_REGION_NAME: 'us-east-1' -JOURNALS_AWS_SES_REGION_ENDPOINT: 'email.us-east-1.amazonaws.com' - -# For default email backend SMTP, following settings are required -JOURNALS_EMAIL_HOST: 'localhost' -JOURNALS_EMAIL_PORT: 25 -JOURNALS_EMAIL_USE_TLS: False -JOURNALS_EMAIL_HOST_USER: '' -JOURNALS_EMAIL_HOST_PASSWORD: '' - -JOURNALS_PUBLISHER_FROM_EMAIL: !!null - -JOURNALS_OPENEXCHANGERATES_API_KEY: '' - -JOURNALS_GUNICORN_EXTRA: '' - -JOURNALS_EXTRA_APPS: [] - -JOURNALS_REPOS: - - PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}" - DOMAIN: "{{ COMMON_GIT_MIRROR }}" - PATH: "{{ COMMON_GIT_PATH }}" - REPO: 'journals.git' - VERSION: "{{ JOURNALS_VERSION }}" - DESTINATION: "{{ journals_code_dir }}" - SSH_KEY: "{{ JOURNALS_GIT_IDENTITY }}" - -journals_service_config_overrides: - ELASTICSEARCH_URL: '{{ JOURNALS_ELASTICSEARCH_URL }}' - ELASTICSEARCH_INDEX_NAME: '{{ JOURNALS_ELASTICSEARCH_INDEX_NAME }}' - - PLATFORM_NAME: '{{ JOURNALS_PLATFORM_NAME }}' - - DEFAULT_PARTNER_ID: '{{ JOURNALS_DEFAULT_PARTNER_ID }}' - - EMAIL_BACKEND: '{{ JOURNALS_EMAIL_BACKEND }}' - - # Settings for django-ses email backend - AWS_SES_REGION_NAME: '{{ JOURNALS_AWS_SES_REGION_NAME }}' - AWS_SES_REGION_ENDPOINT: '{{ JOURNALS_AWS_SES_REGION_ENDPOINT }}' - - # Settings for default django SMTP email backend - EMAIL_HOST: '{{ JOURNALS_EMAIL_HOST }}' - EMAIL_PORT: '{{ JOURNALS_EMAIL_PORT }}' - EMAIL_USE_TLS: '{{ JOURNALS_EMAIL_USE_TLS }}' - EMAIL_HOST_USER: '{{ JOURNALS_EMAIL_HOST_USER }}' - EMAIL_HOST_PASSWORD: '{{ JOURNALS_EMAIL_HOST_PASSWORD }}' - - PUBLISHER_FROM_EMAIL: '{{ JOURNALS_PUBLISHER_FROM_EMAIL }}' - - OPENEXCHANGERATES_API_KEY: '{{ JOURNALS_OPENEXCHANGERATES_API_KEY }}' - - LANGUAGE_CODE: '{{JOURNALS_LANGUAGE_CODE}}' - PARLER_DEFAULT_LANGUAGE_CODE: '{{JOURNALS_PARLER_DEFAULT_LANGUAGE_CODE}}' - PARLER_LANGUAGES : '{{JOURNALS_PARLER_LANGUAGES}}' - CSRF_COOKIE_SECURE: "{{ JOURNALS_CSRF_COOKIE_SECURE }}" - -# See edx_django_service_automated_users for an example of what this should be -JOURNALS_AUTOMATED_USERS: {} - -JOURNALS_CSRF_COOKIE_SECURE: false - -JOURNALS_DISCOVERY_SERVICE_URL: "https://discovery-{{ EDXAPP_LMS_BASE }}" -JOURNALS_LMS_URL_ROOT: "https://{{ EDXAPP_LMS_BASE }}" -JOURNALS_DISCOVERY_API_URL: "{{ JOURNALS_DISCOVERY_SERVICE_URL }}/api/v1/" -JOURNALS_DISCOVERY_JOURNALS_API_URL: "{{ JOURNALS_DISCOVERY_SERVICE_URL }}/journal/api/v1/" -JOURNALS_ECOMMERCE_BASE_URL: "{{ ECOMMERCE_ECOMMERCE_URL_ROOT }}" -JOURNALS_ECOMMERCE_API_URL: "{{ JOURNALS_ECOMMERCE_BASE_URL }}/api/v2/" -JOURNALS_ECOMMERCE_JOURNALS_API_URL: "{{ JOURNALS_ECOMMERCE_BASE_URL }}/journal/api/v1" -journals_create_demo_data: false - -journals_post_migrate_commands: - - command: > - ./manage.py create_site - --sitename "DemoSite" - --hostname "journals-{{ EDXAPP_SITE_NAME }}" - --port "80" - --lms-url-root "{{ JOURNALS_LMS_URL_ROOT }}" - --lms-public-url-root-override "{{ JOURNALS_LMS_URL_ROOT }}" - --discovery-api-url "{{ JOURNALS_DISCOVERY_API_URL }}" - --ecommerce-api-url "{{ JOURNALS_ECOMMERCE_API_URL }}" - --discovery-partner-id "edX" - --ecommerce-partner-id "edX" - --currency-codes USD - --client-secret "{{ JOURNALS_SOCIAL_AUTH_EDX_OIDC_SECRET }}" - --client-id "{{ JOURNALS_SOCIAL_AUTH_EDX_OIDC_KEY }}" - --discovery-journal-api-url "{{ JOURNALS_DISCOVERY_JOURNALS_API_URL }}" - --ecommerce-journal-api-url "{{ JOURNALS_ECOMMERCE_JOURNALS_API_URL }}" - --ecommerce-public-url-root "{{ JOURNALS_ECOMMERCE_BASE_URL }}" - when: "{{ journals_create_demo_data }}" - - command: './manage.py create_org --key "edX" --sitename "DemoSite"' - when: "{{ journals_create_demo_data }}" - - command: > - {{ COMMON_BIN_DIR }}/python.journals {{ COMMON_BIN_DIR }}/manage.journals publish_journals --create "Demo Journal" --org "edX" --price "100.00" - become_user: "{{ journals_user }}" - environment: "{{ journals_environment }}" - # when: "{{ JOURNALS_ENABLED }}" - # re-enable once we create automatically create an Organization in Discovery - when: false - - diff --git a/playbooks/roles/journals/meta/main.yml b/playbooks/roles/journals/meta/main.yml deleted file mode 100644 index 1f94f11f5b5..00000000000 --- a/playbooks/roles/journals/meta/main.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS -# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -## -# Role includes for role journals -# -# Example: -# -# dependencies: -# - { -# role: my_role -# my_role_var0: 'foo' -# my_role_var1: 'bar' -# } -dependencies: - - role: docker - - role: docker-tools - - role: edx_django_service - edx_django_service_version: '{{ JOURNALS_VERSION }}' - edx_django_service_repos: '{{ JOURNALS_REPOS }}' - edx_django_service_name: '{{ journals_service_name }}' - edx_django_service_user: '{{ journals_user }}' - edx_django_service_home: '{{ COMMON_APP_DIR }}/{{ journals_service_name }}' - edx_django_service_config_overrides: '{{ journals_service_config_overrides }}' - edx_django_service_debian_pkgs_extra: '{{ journals_debian_pkgs }}' - edx_django_service_gunicorn_port: '{{ journals_gunicorn_port }}' - edx_django_service_django_settings_module: '{{ JOURNALS_DJANGO_SETTINGS_MODULE }}' - edx_django_service_environment_extra: '{{ journals_environment }}' - edx_django_service_gunicorn_extra: '{{ JOURNALS_GUNICORN_EXTRA }}' - edx_django_service_wsgi_name: 'journals' - edx_django_service_nginx_port: '{{ JOURNALS_NGINX_PORT }}' - edx_django_service_ssl_nginx_port: '{{ JOURNALS_SSL_NGINX_PORT }}' - edx_django_service_language_code: '{{ JOURNALS_LANGUAGE_CODE }}' - edx_django_service_secret_key: '{{ JOURNALS_SECRET_KEY }}' - edx_django_service_staticfiles_storage: '{{ JOURNALS_STATICFILES_STORAGE }}' - edx_django_service_media_storage_backend: '{{ JOURNALS_MEDIA_STORAGE_BACKEND }}' - edx_django_service_memcache: '{{ JOURNALS_MEMCACHE }}' - edx_django_service_default_db_host: '{{ JOURNALS_MYSQL }}' - edx_django_service_default_db_name: '{{ JOURNALS_DEFAULT_DB_NAME }}' - edx_django_service_default_db_atomic_requests: false - edx_django_service_db_user: '{{ JOURNALS_MYSQL_USER }}' - edx_django_service_db_password: '{{ JOURNALS_MYSQL_PASSWORD }}' - edx_django_service_social_auth_edx_oidc_key: '{{ JOURNALS_SOCIAL_AUTH_EDX_OIDC_KEY }}' - edx_django_service_social_auth_edx_oidc_secret: '{{ JOURNALS_SOCIAL_AUTH_EDX_OIDC_SECRET }}' - edx_django_service_social_auth_redirect_is_https: '{{ JOURNALS_SOCIAL_AUTH_REDIRECT_IS_HTTPS }}' - edx_django_service_extra_apps: '{{ JOURNALS_EXTRA_APPS }}' - edx_django_service_session_expire_at_browser_close: '{{ JOURNALS_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' - edx_django_service_automated_users: '{{ JOURNALS_AUTOMATED_USERS }}' - edx_django_service_use_python3: true - edx_django_service_post_migrate_commands: '{{ journals_post_migrate_commands }}' - edx_django_service_has_static_assets: true diff --git a/playbooks/roles/journals/tasks/Dockerfile b/playbooks/roles/journals/tasks/Dockerfile deleted file mode 100644 index 41c201fc54b..00000000000 --- a/playbooks/roles/journals/tasks/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM elasticsearch:5.6.9 -RUN bin/elasticsearch-plugin install ingest-attachment diff --git a/playbooks/roles/journals/tasks/main.yml b/playbooks/roles/journals/tasks/main.yml deleted file mode 100644 index 0bcd685d72e..00000000000 --- a/playbooks/roles/journals/tasks/main.yml +++ /dev/null @@ -1,82 +0,0 @@ ---- -# -# edX Configuration -# -# github: https://github.com/edx/configuration -# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS -# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions -# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT -# -# -# -# Tasks for role discovery -# -# Overview: This role's tasks come from edx_django_service. -# -# -# Dependencies: -# -# -# Example play: -# -# -- name: Create LMS catalog integration - shell: > - {{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp lms --settings={{ COMMON_EDXAPP_SETTINGS }} - create_catalog_integrations --enabled --internal_api_url {{ EDXAPP_DISCOVERY_API_URL }} - --service_username {{ DISCOVERY_SERVICE_USER_NAME }} - become_user: "{{ edxapp_user }}" - environment: "{{ edxapp_environment }}" - tags: - - manage - - manage:db - -- name: create web-writable journal data dirs - file: - path: "{{ item }}" - state: directory - owner: "{{ common_web_user }}" - group: "{{ journals_user }}" - mode: "0775" - with_items: - - "{{ JOURNALS_MEDIA_ROOT }}" - tags: - - install - - install:base - -- name: build elasticsearch5 image - docker_image: - path: /edx/app/edx_ansible/edx_ansible/playbooks/roles/journals/tasks/ - name: elasticsearch5 - state: present - tags: - - install - - install:base - -- name: create elasticsearch5 docker container - docker_container: - name: elasticsearch5 - image: elasticsearch5 - state: started - ports: - - "9500:9200" - tags: - - install - - install:base - -- name: wait for elasticsearch container to start - wait_for: - port: 9500 - delay: 10 - tags: - - install - - install:base - -- name: update journal elasticsearch index - shell: > - {{ COMMON_BIN_DIR }}/python.journals {{ COMMON_BIN_DIR }}/manage.journals update_index --settings={{ JOURNALS_DJANGO_SETTINGS_MODULE }} - become_user: "{{ journals_user }}" - environment: "{{ journals_environment }}" - tags: - - install - - install:app-requirements diff --git a/playbooks/roles/jscover/defaults/main.yml b/playbooks/roles/jscover/defaults/main.yml index 181953dc743..ef424b90569 100644 --- a/playbooks/roles/jscover/defaults/main.yml +++ b/playbooks/roles/jscover/defaults/main.yml @@ -6,5 +6,6 @@ jscover_role_name: jscover # JSCover direct download URL -jscover_version: "1.0.2" +JSCOVER_VERSION: "1.0.2" +jscover_version: "{{ JSCOVER_VERSION }}" jscover_url: "http://files.edx.org/testeng/JSCover-{{ jscover_version }}.zip" diff --git a/playbooks/roles/jwt_signature/defaults/main.yml b/playbooks/roles/jwt_signature/defaults/main.yml new file mode 100644 index 00000000000..74dec19ac6f --- /dev/null +++ b/playbooks/roles/jwt_signature/defaults/main.yml @@ -0,0 +1,32 @@ +# Default variables for the jwt_signature role, automatically loaded +# when the role is included. Can be overridden at time of inclusion. +--- + +# Name of the file to store generated JWT signature settings into. +# This file will have the form: +# JWT_AUTH: +# JWT_PRIVATE_SIGNING_JWK: ... +# JWT_PUBLIC_SIGNING_JWK_SET: ... +# JWT_SIGNING_ALGORITHM: .. +jwt_signature_file: /tmp/lms_jwt_signature.yml + +# these variables are needed to execute the generate_jwt_signing_key management command. +edxapp_env_path: /edx/app/edxapp/edxapp_env +edxapp_venv_dir: /edx/app/edxapp/venvs/edxapp +edxapp_code_dir: /edx/app/edxapp/edx-platform + +# the application config file that we'll inject JWT_AUTH settings into +app_config_file: /edx/etc/lms.yml + +# template file used to re-render app config +app_config_template: roles/edxapp/templates/lms.yml.j2 + +# which user and group owns the app config file, with what perms +app_config_owner: edxapp +app_config_group: www-data +app_config_mode: 0640 + +# The only play that sets this to true is edxapp - +# whenever the edxapp play is executed, the management +# command that generates an updated public JWK set is run. +CAN_GENERATE_NEW_JWT_SIGNATURE: False diff --git a/playbooks/roles/jwt_signature/tasks/main.yml b/playbooks/roles/jwt_signature/tasks/main.yml new file mode 100644 index 00000000000..7a834678061 --- /dev/null +++ b/playbooks/roles/jwt_signature/tasks/main.yml @@ -0,0 +1,61 @@ +# Generate JWT signature settings (probably if you're on sandbox) + +- name: create JWT signature settings + shell: . {{ edxapp_env_path }} && {{ edxapp_venv_dir }}/bin/python manage.py lms generate_jwt_signing_key --output-file {{ jwt_signature_file }} --strip-key-prefix + args: + chdir: "{{ edxapp_code_dir }}" + when: CAN_GENERATE_NEW_JWT_SIGNATURE + +- name: fetch JWT signature settings from host + fetch: + src: "{{ jwt_signature_file }}" + # this will save into /tmp/{{ inventory_hostname }}/{{ jwt_signature_file }} on host + dest: /tmp + +- name: read JWT signature settings + include_vars: + file: "/tmp/{{ inventory_hostname }}/{{ jwt_signature_file }}" + name: lms_jwt_signature + +- name: fetch app config from host + fetch: + src: "{{ app_config_file }}" + # this will save into /tmp/{{ inventory_hostname }}/{{ app_config_file }} on host + dest: /tmp + +- name: read app config into variable + include_vars: + file: "/tmp/{{ inventory_hostname }}/{{ app_config_file }}" + name: app_config_vars + +- name: combine app config with jwt_signature config + set_fact: + app_combined_config: '{{ app_config_vars | combine(lms_jwt_signature, recursive=True) }}' + +- name: render app config with jwt signature to yaml file + template: + src: roles/jwt_signature/templates/app_config.yml.j2 + dest: "{{ app_config_file }}" + owner: "{{ app_config_owner }}" + group: "{{ app_config_group }}" + mode: "{{ app_config_mode }}" + +- name: delete JWT signature file on host + file: + path: "/tmp/{{ inventory_hostname }}/{{ jwt_signature_file }}" + state: absent + +- name: delete app config file on host + file: + path: "/tmp/{{ inventory_hostname }}/{{ app_config_file }}" + state: absent + +# The app must be restarted so that the config file variables +# are loaded into the Django settings. +- name: restart the application to load JWT signature settings + supervisorctl: + name: "{{ app_name }}" + supervisorctl_path: "{{ supervisor_ctl }}" + config: "{{ supervisor_cfg }}" + state: restarted + become_user: "{{ supervisor_service_user }}" diff --git a/playbooks/roles/jwt_signature/templates/app_config.yml.j2 b/playbooks/roles/jwt_signature/templates/app_config.yml.j2 new file mode 100644 index 00000000000..7dc3a5c3d8d --- /dev/null +++ b/playbooks/roles/jwt_signature/templates/app_config.yml.j2 @@ -0,0 +1,3 @@ +{% if app_combined_config %} +{{ app_combined_config | to_nice_yaml }} +{% endif %} diff --git a/playbooks/roles/launch_ec2/tasks/main.yml b/playbooks/roles/launch_ec2/tasks/main.yml index 21cb343aaa1..c9f096429fe 100644 --- a/playbooks/roles/launch_ec2/tasks/main.yml +++ b/playbooks/roles/launch_ec2/tasks/main.yml @@ -68,7 +68,7 @@ record: "{{ dns_name }}.{{ dns_zone }}" value: "{{ item.public_dns_name }}" register: task_result - until: task_result|succeeded + until: task_result is succeeded retries: 5 delay: 30 with_items: "{{ ec2.instances }}" @@ -84,12 +84,12 @@ record: "{{ item[1] }}-{{ dns_name }}.{{ dns_zone }}" value: "{{ item[0].public_dns_name }}" register: task_result - until: task_result|succeeded + until: task_result is succeeded retries: 5 delay: 30 with_nested: - "{{ ec2.instances }}" - - ['studio', 'ecommerce', 'preview', 'discovery', 'journals', 'credentials', 'veda', 'analytics-api'] + - ['studio', 'ecommerce', 'preview', 'discovery', 'credentials', 'veda', 'analytics-api', 'registrar', 'program-manager', 'learner-portal'] - name: Add DNS name for whitelabel sites local_action: @@ -102,7 +102,7 @@ record: "{{ item[1] }}" value: "{{ item[0].public_dns_name }}" register: task_result - until: task_result|succeeded + until: task_result is succeeded retries: 5 delay: 30 with_nested: diff --git a/playbooks/roles/learner_portal/defaults/main.yml b/playbooks/roles/learner_portal/defaults/main.yml new file mode 100644 index 00000000000..7f472d92002 --- /dev/null +++ b/playbooks/roles/learner_portal/defaults/main.yml @@ -0,0 +1,44 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# +learner_portal_home: '{{ COMMON_APP_DIR }}/{{ learner_portal_service_name }}' +NVM_DIR: '{{ learner_portal_home }}' +learner_portal_user: 'root' +learner_portal_git_identity: 'none' +edx_django_service_use_python3: false +learner_portal_repo: 'https://github.com/edx/frontend-app-learner-portal.git' +LEARNER_PORTAL_VERSION: 'master' +learner_portal_version: "{{ LEARNER_PORTAL_VERSION }}" +learner_portal_service_name: 'learner_portal' +LEARNER_PORTAL_NODE_VERSION: '12.11.1' +learner_portal_node_version: "{{ LEARNER_PORTAL_NODE_VERSION }}" + +learner_portal_nodeenv_dir: '{{ learner_portal_home }}/nodeenvs/{{ learner_portal_service_name }}' +learner_portal_nodeenv_bin: '{{learner_portal_nodeenv_dir}}/bin' +learner_portal_app_dir: "{{ COMMON_APP_DIR }}/learner_portal" +learner_portal_code_dir: "{{ learner_portal_app_dir }}/learner_portal" +learner_portal_dist_dir: "{{ learner_portal_code_dir }}/dist" +learner_portal_env_vars: + PATH: "{{ learner_portal_nodeenv_bin }}:{{ ansible_env.PATH }}" + NODE_ENV: "production" + ACTIVE_ENV: "production" + BASE_URL: 'https://learner-portal-{{ COMMON_LMS_BASE_URL }}' + LMS_BASE_URL: '{{ COMMON_LMS_BASE_URL }}' + LOGIN_URL: '{{ COMMON_LMS_BASE_URL }}/login' + LOGOUT_URL: '{{ COMMON_LMS_BASE_URL }}/logout' + CSRF_TOKEN_API_PATH: '/csrf/api/v1/token' + REFRESH_ACCESS_TOKEN_ENDPOINT: '{{ COMMON_LMS_BASE_URL }}/login_refresh' + ACCESS_TOKEN_COOKIE_NAME: 'edx-jwt-cookie-header-payload' + USER_INFO_COOKIE_NAME: 'edx-user-info' + DESIGNER_BASE_URL: '' + HOST_NAME: '' + SEGMENT_KEY: '' + MOCK_DATA: true diff --git a/playbooks/roles/learner_portal/meta/main.yml b/playbooks/roles/learner_portal/meta/main.yml new file mode 100644 index 00000000000..3d12d718ea7 --- /dev/null +++ b/playbooks/roles/learner_portal/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - nginx diff --git a/playbooks/roles/learner_portal/tasks/main.yml b/playbooks/roles/learner_portal/tasks/main.yml new file mode 100644 index 00000000000..a82695f92fa --- /dev/null +++ b/playbooks/roles/learner_portal/tasks/main.yml @@ -0,0 +1,88 @@ +- name: Remove old git repo + file: + state: absent + path: "{{ learner_portal_code_dir }}/" + +- name: Remove old app repo + file: + state: absent + path: "{{ learner_portal_app_dir }}" + +- name: Create learner_portal app folder + file: + path: "{{ learner_portal_app_dir }}" + state: directory + owner: "{{ learner_portal_user }}" + group: "{{ learner_portal_user }}" + +- name: Checkout learner_portal repo into {{ learner_portal_code_dir }} + git: + dest: "{{ learner_portal_code_dir }}" + repo: "{{ learner_portal_repo }}" + version: "{{ learner_portal_version }}" + accept_hostkey: yes + become_user: "{{ learner_portal_user }}" + register: learner_portal_checkout + +- name: Install nodeenv + apt: + name: nodeenv + become_user: "{{ learner_portal_user }}" + environment: "{{ learner_portal_env_vars }}" + tags: + - install + - install:system-requirements + +# Install node +- name: Create nodeenv + shell: "nodeenv {{ learner_portal_nodeenv_dir }} --node={{ learner_portal_node_version }} --prebuilt --force" + become_user: "{{ learner_portal_user }}" + environment: "{{ learner_portal_env_vars }}" + tags: + - install + - install:system-requirements + +# Set the npm registry +# This needs to be done as root since npm is weird about +# chown - https://github.com/npm/npm/issues/3565 +- name: Set the npm registry + shell: "{{ learner_portal_nodeenv_bin }}/npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'" + args: + creates: "{{ learner_portal_code_dir }}/.npmrc" + environment: "{{ learner_portal_env_vars }}" + become_user: "{{ learner_portal_user }}" + tags: + - install + - install:app-requirements + +# we need to do this so that npm can find a node install to use to build node-sass +- name: prepend node path + shell: "{{ learner_portal_nodeenv_bin }}/npm config set scripts-prepend-node-path true" + environment: "{{ learner_portal_env_vars }}" + become_user: "{{ learner_portal_user }}" + tags: + - install + - install:app-requirements + +# install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json +# The version of ansible we are using also does not make use of "--unsafe-perm", which we need for node-sass +- name: Install node dependencies + shell: "sudo {{ learner_portal_nodeenv_bin }}/node {{ learner_portal_nodeenv_bin }}/npm i --unsafe-perm" + args: + chdir: "{{ learner_portal_code_dir }}" + environment: "{{ learner_portal_env_vars }}" + become_user: "{{ learner_portal_user }}" + tags: + - install + - install:app-requirements + +# install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json +- name: Run learner_portal build + shell: "npm run build" + args: + chdir: "{{ learner_portal_code_dir }}" + environment: "{{ learner_portal_env_vars }}" + become_user: "{{ learner_portal_user }}" + tags: + - install + - install:app-requirements diff --git a/playbooks/roles/local_dev/defaults/main.yml b/playbooks/roles/local_dev/defaults/main.yml index 4c92b9ec956..28a8f38e977 100644 --- a/playbooks/roles/local_dev/defaults/main.yml +++ b/playbooks/roles/local_dev/defaults/main.yml @@ -70,10 +70,22 @@ localdev_accounts: } - { - user: "{{ journals_user|default('None') }}", - home: "{{ journals_home|default('None') }}", - env: "journals_env", - repo: "journals" + user: "{{ registrar_user|default('None') }}", + home: "{{ registrar_home|default('None') }}", + env: "registrar_env", + repo: "registrar" + } + - { + user: "{{ learner_portal_user|default('None') }}", + home: "{{ learner_portal_home|default('None') }}", + env: "learner_portal_env", + repo: "learner_portal" + } + - { + user: "{{ program_manager_user|default('None') }}", + home: "{{ program_manager_home|default('None') }}", + env: "program_manager_env", + repo: "program_manager" } # Helpful system packages for local dev @@ -84,4 +96,5 @@ local_dev_pkgs: - openbox - libffi-dev -localdev_jscover_version: "1.0.2" +LOCALDEV_JSCOVER_VERSION: "1.0.2" +localdev_jscover_version: "{{ LOCALDEV_JSCOVER_VERSION }}" diff --git a/playbooks/roles/local_dev/files/paver_autocomplete.sh b/playbooks/roles/local_dev/files/paver_autocomplete.sh deleted file mode 100644 index 085334a7ec9..00000000000 --- a/playbooks/roles/local_dev/files/paver_autocomplete.sh +++ /dev/null @@ -1,139 +0,0 @@ -# Courtesy of Gregory Nicholas - -_subcommand_opts() -{ - local awkfile command cur usage - command=$1 - cur=${COMP_WORDS[COMP_CWORD]} - awkfile=/tmp/paver-option-awkscript-$$.awk - echo ' -BEGIN { - opts = ""; -} - -{ - for (i = 1; i <= NF; i = i + 1) { - # Match short options (-a, -S, -3) - # or long options (--long-option, --another_option) - # in output from paver help [subcommand] - if ($i ~ /^(-[A-Za-z0-9]|--[A-Za-z][A-Za-z0-9_-]*)/) { - opt = $i; - # remove trailing , and = characters. - match(opt, "[,=]"); - if (RSTART > 0) { - opt = substr(opt, 0, RSTART); - } - opts = opts " " opt; - } - } -} - -END { - print opts -}' > $awkfile - - usage=`paver help $command` - options=`echo "$usage"|awk -f $awkfile` - - COMPREPLY=( $(compgen -W "$options" -- "$cur") ) -} - - -_paver() -{ - local cur prev - COMPREPLY=() - # Variable to hold the current word - cur="${COMP_WORDS[COMP_CWORD]}" - prev="${COMP_WORDS[COMP_CWORD - 1]}" - - # Build a list of the available tasks from: `paver --help --quiet` - local cmds=$(paver -hq | awk '/^ ([a-zA-Z][a-zA-Z0-9_]+)/ {print $1}') - - subcmd="${COMP_WORDS[1]}" - # Generate possible matches and store them in the - # array variable COMPREPLY - - if [[ -n $subcmd ]] - then - case $subcmd in - test_system) - - _test_system_args - if [[ -n $COMPREPLY ]] - then - return 0 - fi - ;; - test_bokchoy) - _test_bokchoy_args - if [[ -n $COMPREPLY ]] - then - return 0 - fi - ;; - *) - ;; - esac - - if [[ ${#COMP_WORDS[*]} == 3 ]] - then - _subcommand_opts $subcmd - return 0 - else - if [[ "$cur" == -* ]] - then - _subcommand_opts $subcmd - return 0 - else - COMPREPLY=( $(compgen -o nospace -- "$cur") ) - fi - fi - fi - - if [[ ${#COMP_WORDS[*]} == 2 ]] - then - COMPREPLY=( $(compgen -W "${cmds}" -- "$cur") ) - fi -} - -_test_system_args() -{ - local cur prev - cur="${COMP_WORDS[COMP_CWORD]}" - prev="${COMP_WORDS[COMP_CWORD - 1]}" - - case "$prev" in - -s|--system) - COMPREPLY=( $(compgen -W "lms cms" -- "$cur") ) - return 0 - ;; - *) - ;; - esac -} - -_test_bokchoy_args() -{ - local bokchoy_tests cur prev - cur="${COMP_WORDS[COMP_CWORD]}" - prev="${COMP_WORDS[COMP_CWORD - 1]}" - - case "$prev" in - -d|--test_dir) - bokchoy_tests=`find common/test/acceptance -name \*.py| sed 's:common/test/acceptance/::'` - COMPREPLY=( $(compgen -o filenames -W "$bokchoy_tests" -- $cur) ) - return 0 - ;; - -t|--test_spec) - bokchoy_tests=`find common/test/acceptance/tests -name \*.py| sed 's:common/test/acceptance/::'` - COMPREPLY=( $(compgen -o filenames -W "$bokchoy_tests" -- $cur) ) - return 0 - ;; - *) - ;; - esac -} -# Assign the auto-completion function for our command. - -complete -F _paver -o default paver diff --git a/playbooks/roles/local_dev/tasks/main.yml b/playbooks/roles/local_dev/tasks/main.yml index 94426f9c510..4ee37052fbb 100644 --- a/playbooks/roles/local_dev/tasks/main.yml +++ b/playbooks/roles/local_dev/tasks/main.yml @@ -82,19 +82,7 @@ with_items: "{{ localdev_accounts }}" when: item.user != 'None' -# Create scripts to add paver autocomplete -- name: Add paver autocomplete - copy: - src: paver_autocomplete.sh - dest: "{{ item.home }}/.bashrc.d/paver_autocomplete.sh" - owner: "{{ item.user }}" - group: "{{ common_web_group }}" - mode: "0755" - with_items: "{{ localdev_accounts }}" - when: item.user != 'None' - ignore_errors: yes - -# Add useful vimrc files +# Add useful vimrc files - name: Create .vim/plugin directory file: path: "{{ item.home }}/.vim/ftplugin" diff --git a/playbooks/roles/locust/meta/main.yml b/playbooks/roles/locust/meta/main.yml index 71b4848235b..2807f88c073 100644 --- a/playbooks/roles/locust/meta/main.yml +++ b/playbooks/roles/locust/meta/main.yml @@ -13,13 +13,13 @@ dependencies: - common - loadtest_driver - - role: edx_service - edx_service_name: "{{ locust_service_name }}" - edx_service_config: "{{ LOCUST_SERVICE_CONFIG }}" - edx_service_repos: "{{ LOCUST_REPOS }}" - edx_service_user: "{{ locust_user }}" - edx_service_home: "{{ locust_home }}" - edx_service_packages: + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ locust_service_name }}" + edx_service_with_rendered_config_service_config: "{{ LOCUST_SERVICE_CONFIG }}" + edx_service_with_rendered_config_repos: "{{ LOCUST_REPOS }}" + edx_service_with_rendered_config_user: "{{ locust_user }}" + edx_service_with_rendered_config_home: "{{ locust_home }}" + edx_service_with_rendered_config_packages: debian: "{{ locust_debian_pkgs }}" redhat: "{{ locust_redhat_pkgs }}" diff --git a/playbooks/roles/mariadb/defaults/main.yml b/playbooks/roles/mariadb/defaults/main.yml index b2cb284c7b0..722c824edac 100644 --- a/playbooks/roles/mariadb/defaults/main.yml +++ b/playbooks/roles/mariadb/defaults/main.yml @@ -75,14 +75,14 @@ MARIADB_DATABASES: db: "{{ DISCOVERY_DEFAULT_DB_NAME | default(None) }}", encoding: "utf8" } - - { - db: "{{ JOURNALS_DEFAULT_DB_NAME | default(None) }}", - encoding: "utf8" - } - { db: "{{ HIVE_METASTORE_DATABASE_NAME | default(None) }}", encoding: "latin1" } + - { + db: "{{ BLOCKSTORE_DEFAULT_DB_NAME | default(None) }}", + encoding: "utf8" + } MARIADB_USERS: - { @@ -137,9 +137,9 @@ MARIADB_USERS: pass: "{{ DISCOVERY_MYSQL_PASSWORD | default(None) }}" } - { - db: "{{ JOURNALS_DEFAULT_DB_NAME | default(None) }}", - user: "{{ JOURNALS_MYSQL_USER | default(None) }}", - pass: "{{ JOURNALS_MYSQL_PASSWORD | default(None) }}" + db: "{{ BLOCKSTORE_DEFAULT_DB_NAME | default(None) }}", + user: "{{ BLOCKSTORE_DATABASE_USER | default(None) }}", + pass: "{{ BLOCKSTORE_DATABASE_PASSWORD | default(None) }}" } # diff --git a/playbooks/roles/minos/defaults/main.yml b/playbooks/roles/minos/defaults/main.yml index ae3d4bc3be8..b2772b24667 100644 --- a/playbooks/roles/minos/defaults/main.yml +++ b/playbooks/roles/minos/defaults/main.yml @@ -9,7 +9,7 @@ # ## # Defaults for role minos -# +# MINOS_GIT_IDENTITY: !!null MINOS_SERVICE_CONFIG: @@ -33,9 +33,10 @@ minos_git_ssh: "/tmp/git.sh" minos_git_identity: "{{ minos_app_dir }}/minos-git-identity" minos_edx_server_tools_repo: "git@github.com/edx-ops/edx-minos.git" minos_edx_server_tools_branch: "release" -minos_edx_server_tools_version: "0.4" +MINOS_EDX_SERVER_TOOLS_VERSION: "0.4" +minos_edx_server_tools_version: "{{ MINOS_EDX_SERVER_TOOLS_VERSION }}" minos_requirement: "git+ssh://{{ minos_edx_server_tools_repo }}@{{ minos_edx_server_tools_branch }}#egg=edx-minos" - + # # OS packages # diff --git a/playbooks/roles/minos/meta/main.yml b/playbooks/roles/minos/meta/main.yml index 4d14993de1c..e73b1272d1f 100644 --- a/playbooks/roles/minos/meta/main.yml +++ b/playbooks/roles/minos/meta/main.yml @@ -9,21 +9,21 @@ # ## # Role includes for role minos -# +# # Example: # # dependencies: # - { -# role: my_role +# role: my_role # my_role_var0: "foo" # my_role_var1: "bar" # } dependencies: - - role: edx_service - edx_service_name: "{{ minos_service_name }}" - edx_service_config: "{{ MINOS_SERVICE_CONFIG }}" - edx_service_user: root - edx_service_home: "{{ minos_app_dir }}" - edx_service_packages: + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ minos_service_name }}" + edx_service_with_rendered_config_service_config: "{{ MINOS_SERVICE_CONFIG }}" + edx_service_with_rendered_config_user: root + edx_service_with_rendered_config_home: "{{ minos_app_dir }}" + edx_service_with_rendered_config_packages: debian: "{{ minos_debian_pkgs }}" - redhat: "{{ minos_redhat_pkgs }}" \ No newline at end of file + redhat: "{{ minos_redhat_pkgs }}" diff --git a/playbooks/roles/minos/tasks/main.yml b/playbooks/roles/minos/tasks/main.yml index 4626a347baf..bdc2ae2d6f1 100644 --- a/playbooks/roles/minos/tasks/main.yml +++ b/playbooks/roles/minos/tasks/main.yml @@ -10,7 +10,7 @@ # # # Tasks for role minos -# +# # Overview: # # Install the, currently private, minos application @@ -20,7 +20,7 @@ # Dependencies: # # Relies on the common role. -# +# # Example play: # # - name: Deploy minos @@ -31,7 +31,7 @@ # COMMON_ENABLE_MINOS: True # roles: # - common -# - minos +# - minos # - name: Create minos config directory @@ -41,7 +41,7 @@ owner: root group: root mode: "0755" - + - name: Create minos voters configs template: dest: "{{ minos_voter_cfg }}/{{ item }}.yml" diff --git a/playbooks/roles/mongo_2_6/defaults/main.yml b/playbooks/roles/mongo_2_6/defaults/main.yml index 95017667a4a..250ab16f333 100644 --- a/playbooks/roles/mongo_2_6/defaults/main.yml +++ b/playbooks/roles/mongo_2_6/defaults/main.yml @@ -1,11 +1,13 @@ mongo_logappend: true -mongo_version: 2.6.5 +MONGO_VERSION: 2.6.5 +mongo_version: "{{ MONGO_VERSION }}" mongo_port: "27017" mongo_extra_conf: '' mongo_key_file: '/etc/mongodb_key' mongo_repl_set: rs0 mongo_cluster_members: [] -pymongo_version: 2.7.2 +PYMONGO_VERSION: 2.7.2 +pymongo_version: "{{ PYMONGO_VERSION }}" mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" diff --git a/playbooks/roles/mongo_2_6/tasks/main.yml b/playbooks/roles/mongo_2_6/tasks/main.yml index 1e1672b54b0..7ff19ef8558 100644 --- a/playbooks/roles/mongo_2_6/tasks/main.yml +++ b/playbooks/roles/mongo_2_6/tasks/main.yml @@ -233,10 +233,9 @@ - manage:app-users - name: Install s3cmd - pip: + apt: name: "s3cmd" state: present - extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" when: MONGO_S3_BACKUP tags: - install diff --git a/playbooks/roles/mongo_3_0/defaults/main.yml b/playbooks/roles/mongo_3_0/defaults/main.yml index 5666d49e8ea..c9403c2f57c 100644 --- a/playbooks/roles/mongo_3_0/defaults/main.yml +++ b/playbooks/roles/mongo_3_0/defaults/main.yml @@ -3,11 +3,13 @@ mongo_logappend: true #This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle mongo_logrotate: reopen -mongo_version: 3.0.14 +MONGO_VERSION: 3.0.14 +mongo_version: "{{ MONGO_VERSION }}" mongo_port: "27017" mongo_extra_conf: '' mongo_key_file: '/etc/mongodb_key' -pymongo_version: 2.8.1 +PYMONGO_VERSION: 2.8.1 +pymongo_version: "{{ PYMONGO_VERSION }}" mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" diff --git a/playbooks/roles/mongo_3_2/defaults/main.yml b/playbooks/roles/mongo_3_2/defaults/main.yml index c81d8121bc2..e7b1e520cbc 100644 --- a/playbooks/roles/mongo_3_2/defaults/main.yml +++ b/playbooks/roles/mongo_3_2/defaults/main.yml @@ -3,22 +3,24 @@ mongo_logappend: true #This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle mongo_logrotate: reopen -mongo_version: 3.2.16 +MONGO_VERSION_MAJOR_MINOR: "3.2" +mongo_version_major_minor: "{{ MONGO_VERSION_MAJOR_MINOR }}" +MONGO_VERSION_PATCH: "16" +mongo_version_patch: "{{ MONGO_VERSION_PATCH }}" +PYMONGO_VERSION: "3.2.2" +pymongo_version: "{{ PYMONGO_VERSION }}" +MONGO_VERSION: "{{ MONGO_VERSION_MAJOR_MINOR }}.{{ MONGO_VERSION_PATCH }}" +mongo_version: "{{ MONGO_VERSION }}" mongo_port: "27017" mongo_extra_conf: '' mongo_key_file: '/etc/mongodb_key' -pymongo_version: 3.2.2 mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal" mongo_user: mongodb -MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/3.2 multiverse" -# Key id taken from https://docs.mongodb.com/v3.2/tutorial/install-mongodb-on-ubuntu/ -# Changes with each major mongo release, so must be updated with Mongo upgrade -MONGODB_APT_KEY: "EA312927" -MONGODB_APT_KEYSERVER: "keyserver.ubuntu.com" +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/{{ mongo_version_major_minor }} multiverse" mongodb_debian_pkgs: - "mongodb-org={{ mongo_version }}" @@ -86,3 +88,28 @@ mongo_dbpath: "{{ mongo_data_dir }}/mongodb" mongo_enable_journal: true MONGO_LOG_SERVERSTATUS: true + +# Vars for configuring a mongo backup node. If enabled, this node will be provisioned with a script that uses mongodump +# to backup the database to an ebs volume at a period set by mongo_backup_cron. +# Set MONGO_BACKUP_ENABLED to true to enable. If enabled, all the other MONGO_BACKUP_ vars must be set according to your +# setup. +MONGO_BACKUP_ENABLED: false +MONGO_BACKUP_NODE: "" # note: most likely the ip address of the instance on which to perform the backups +MONGO_BACKUP_EBS_VOLUME_DEVICE: "" +MONGO_BACKUP_EBS_VOLUME_ID: "" +MONGO_BACKUP_AUTH_DATABASE: "" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE: "" # passed to `date -d`; should be a relative date like "-30days" +MONGO_BACKUP_SNITCH_URL: "" # Optional URL that will be used to ping a monitoring service (such as Dead Man's Snitch) upon successful completion of a backup. +MONGO_BACKUP_VOLUME_MOUNT_PATH: "/mnt/mongo-backup" +MONGO_BACKUP_SNAPSHOT_DESC: "mongo-backup" +mongo_backup_script_path: "/usr/local/sbin/backup-mongo.sh" +mongo_backup_cron: + minute: '12' + hour: '*/12' + day: '*' + month: '*' + weekday: '*' + +# Internal variable set to true dynamically if backups enabled and playbook running on MONGO_BACKUP_NODE. Do not +# manually override. +is_backup_node: false diff --git a/playbooks/roles/mongo_3_2/tasks/main.yml b/playbooks/roles/mongo_3_2/tasks/main.yml index 19731ddf009..2472e7bcceb 100644 --- a/playbooks/roles/mongo_3_2/tasks/main.yml +++ b/playbooks/roles/mongo_3_2/tasks/main.yml @@ -33,8 +33,7 @@ - name: add the mongodb signing key apt_key: - id: "{{ MONGODB_APT_KEY }}" - keyserver: "{{ MONGODB_APT_KEYSERVER }}" + url: "https://www.mongodb.org/static/pgp/server-{{ mongo_version_major_minor }}.asc" state: present retries: 3 tags: @@ -57,6 +56,7 @@ install_recommends: yes force: yes update_cache: yes + register: install_mongo_package with_items: "{{ mongodb_debian_pkgs }}" tags: - "install" @@ -78,29 +78,6 @@ - "install" - "install:app-configuration" -- name: add serverStatus logging script - template: - src: "log-mongo-serverStatus.sh.j2" - dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" - owner: "{{ mongo_user }}" - group: "{{ mongo_user }}" - mode: 0700 - when: MONGO_LOG_SERVERSTATUS - tags: - - "install" - - "install:app-configuration" - -- name: add serverStatus logging script to cron - cron: - name: mongostat logging job - minute: "*/3" - job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 - become: yes - when: MONGO_LOG_SERVERSTATUS - tags: - - "install" - - "install:app-configuration" - # This will error when run on a new replica set, so we ignore_errors # and connect anonymously next. - name: determine if there is a replica set already @@ -187,20 +164,85 @@ - "manage:db-replication" - "update_mongod_conf" +# This sets the is_backup_node var by checking whether +# mongo backups are enabled AND we're currently running against the designated mongo backup node. +# This allows backup-related tasks below to determine whether or not they should run on the current mongo node. +- name: determine if backup tasks should run + set_fact: + is_backup_node: true + when: MONGO_BACKUP_ENABLED and '{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}' == '{{ MONGO_BACKUP_NODE }}' + tags: + - "backup:mongo" + - name: install logrotate configuration template: src: mongo_logrotate.j2 dest: /etc/logrotate.d/hourly/mongo tags: + - "backup:mongo" - "install" - "install:app-configuration" - "logrotate" -- name: restart mongo service if we changed our configuration +- name: install prereqs for backup script + apt: + pkg: "{{ item }}" + state: present + update_cache: yes + with_items: + - jq + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install backup script + template: + src: backup-mongo.sh.j2 + dest: "{{ mongo_backup_script_path }}" + mode: 0700 + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: add mongo backup script to cron + cron: + name: mongo backup job + minute: "{{ mongo_backup_cron.minute | default('12') }}" + hour: "{{ mongo_backup_cron.hour | default('*/12') }}" + day: "{{ mongo_backup_cron.day | default('*') }}" + month: "{{ mongo_backup_cron.month | default('*') }}" + weekday: "{{ mongo_backup_cron.weekday | default('*') }}" + job: "{{ mongo_backup_script_path }} >> {{ mongo_log_dir }}/mongo-backup.log 2>&1" + become: yes + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: format mongo backup volume + filesystem: + dev: "{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" + fstype: ext4 + force: true + ignore_errors: true + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: restart mongo service if we changed our configuration or upgraded mongo service: name: mongod state: restarted - when: update_mongod_conf.changed or update_mongod_key.changed + when: update_mongod_conf.changed or update_mongod_key.changed or install_mongo_package.changed tags: - "manage" - "manage:start" @@ -335,3 +377,26 @@ tags: - "manage" - "manage:start" + +- name: add serverStatus logging script + template: + src: "log-mongo-serverStatus.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + mode: 0700 + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" + +- name: add serverStatus logging script to cron + cron: + name: mongostat logging job + minute: "*/3" + job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 + become: yes + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" diff --git a/playbooks/roles/mongo_3_2/templates/backup-mongo.sh.j2 b/playbooks/roles/mongo_3_2/templates/backup-mongo.sh.j2 new file mode 100644 index 00000000000..61ea25d005d --- /dev/null +++ b/playbooks/roles/mongo_3_2/templates/backup-mongo.sh.j2 @@ -0,0 +1,133 @@ +#!/bin/bash +# ref https://tasks.opencraft.com/browse/SE-1669 +# Script to perform a point-in-time dump of the local mongodb database using +# mongodump. +# includes locking (prevent this script from running multiple times in +# parallel), creating a snapshot of the volume used to backup to + +# exit by default on failure +set -e +# verbose for help with debugging +set -x + +# make sure local/bin is in the path so we can use aws cli +PATH="$PATH:/usr/local/bin" + +# vars set by ansible +MONGO_BACKUP_EBS_VOLUME_DEVICE="{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" # eg. /dev/svdk or /dev/disk/by-label/mylabel +MONGO_BACKUP_EBS_VOLUME_ID="{{ MONGO_BACKUP_EBS_VOLUME_ID }}" # eg. vol-123456 +MONGO_BACKUP_VOLUME_MOUNT_PATH="{{ MONGO_BACKUP_VOLUME_MOUNT_PATH }}" # eg. /mnt/mongobackup/ +MONGO_BACKUP_NODE="{{ MONGO_BACKUP_NODE }}" +EDXAPP_MONGO_DB_NAME="{{ EDXAPP_MONGO_DB_NAME }}" +MONGO_BACKUP_AUTH_DATABASE="{{ MONGO_BACKUP_AUTH_DATABASE }}" +MONGO_ADMIN_USER="{{ MONGO_ADMIN_USER }}" +MONGO_ADMIN_PASSWORD="{{ MONGO_ADMIN_PASSWORD }}" +AWS_ACCESS_KEY_ID="{{ MONGO_BACKUP_AWS_ACCESS_KEY_ID }}" +AWS_SECRET_ACCESS_KEY="{{ MONGO_BACKUP_AWS_SECRET_ACCESS_KEY }}" +MONGO_BACKUP_SNAPSHOT_DESC="{{ MONGO_BACKUP_SNAPSHOT_DESC }}" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE="{{ MONGO_BACKUP_PRUNE_OLDER_THAN_DATE }}" +MONGO_BACKUP_SNITCH_URL="{{ MONGO_BACKUP_SNITCH_URL }}" +aws_region="{{ aws_region }}" + +# export to make available to aws cli +export AWS_ACCESS_KEY_ID +export AWS_SECRET_ACCESS_KEY + +# other vars +archive_path="mongo-backup-$(date --iso-8601=minutes --utc)" + +# verify required variables are set +required() { + if [ -z "$1" ]; then + echo "$2" + required_var_missing="yes" + fi +} +required "$MONGO_BACKUP_EBS_VOLUME_DEVICE" "MONGO_BACKUP_EBS_VOLUME_DEVICE missing; EBS volume device path is required" +required "$MONGO_BACKUP_EBS_VOLUME_ID" "MONGO_BACKUP_EBS_VOLUME_ID missing; EBS volume id is required" +required "$MONGO_BACKUP_VOLUME_MOUNT_PATH" "MONGO_BACKUP_VOLUME_MOUNT_PATH missing; path on which to mount ebs backup volume is required" +required "$MONGO_BACKUP_NODE" "MONGO_BACKUP_NODE missing; this must be set to determine if this is the correct node to run backup on" +required "$MONGO_BACKUP_AUTH_DATABASE" "MONGO_BACKUP_AUTH_DATABASE missing; this must be set to use the correct authenticationDatabase to auth against" +required "$MONGO_ADMIN_USER" "MONGO_ADMIN_USER missing; this must be set to auth against the database" +required "$MONGO_ADMIN_PASSWORD" "MONGO_ADMIN_PASSWORD missing; this must be set to auth against the database" +required "$AWS_ACCESS_KEY_ID" "MONGO_BACKUP_AWS_ACCESS_KEY_ID missing; this must be set to auth against the database" +required "$AWS_SECRET_ACCESS_KEY" "MONGO_BACKUP_AWS_SECRET_ACCESS_KEY missing; this must be set to auth against the database" +required "$aws_region" "aws_region missing; this must be set to use awscli" +[ -n "$required_var_missing" ] && exit 1 + + +# only run on specified node - this pulls the node name (ip address) of the mongo db on this instance +mynodename=$(echo "db.isMaster()" | mongo -u "$MONGO_ADMIN_USER" -p"$MONGO_ADMIN_PASSWORD" --authenticationDatabase "$MONGO_BACKUP_AUTH_DATABASE" "$EDXAPP_MONGO_DB_NAME" | grep \"me\" | cut -f 2 -d ':' | sed -e 's/ //' -e 's/,//' -e 's/"//'); +if [ "$mynodename" != "$MONGO_BACKUP_NODE" ]; then + echo "This is not the backup host. Run on a different instance." + exit 1 +fi + +# Acquire backup lock using this script itself as the lockfile. If another +# backup task is already running, then exit immediately. +exec 200<$0 +flock -n 200 || { echo "Another backup task is already running."; exit 1; } + +echo "Starting at $(date)" + +# ensure volume is mounted +mkdir -p "$MONGO_BACKUP_VOLUME_MOUNT_PATH" +if ! mountpoint -q "$MONGO_BACKUP_VOLUME_MOUNT_PATH"; then + mount -o discard,defaults,noatime "$MONGO_BACKUP_EBS_VOLUME_DEVICE" "$MONGO_BACKUP_VOLUME_MOUNT_PATH" +fi + +# Clean old backup files to save space and so we start afresh. +rm -rf "$MONGO_BACKUP_VOLUME_MOUNT_PATH/mongo/" +mkdir -p "$MONGO_BACKUP_VOLUME_MOUNT_PATH/mongo/" + + +# create the dump +# XXX: we may want to check how this lays out of disk and how it will play against ebs volume snapshots. The idea was +# that snapshots would be cheap because you only pay for data blocks that have changed between one snapshot and the +# next. however, if the mongodump -> gzip process ends up not being consistent in layout, +# this may end up with a lot of the disk content changing between snapshots. +mongodump --host="localhost" --oplog --gzip -u "$MONGO_ADMIN_USER" --password "$MONGO_ADMIN_PASSWORD" --authenticationDatabase "$MONGO_BACKUP_AUTH_DATABASE" --out="$MONGO_BACKUP_VOLUME_MOUNT_PATH/mongo/$archive_path" + + +# flush everything to disk, and unmount the volume ready to snapshot +sync +umount "$MONGO_BACKUP_VOLUME_MOUNT_PATH" + +# create a snapshot of the volume +snapshot_data=$(aws --region "$aws_region" ec2 create-snapshot --volume-id "$MONGO_BACKUP_EBS_VOLUME_ID" --description "$MONGO_BACKUP_SNAPSHOT_DESC") +echo "$snapshot_data" +snapshot_id="$(echo "$snapshot_data" | jq -r .SnapshotId)" + +# Poll until the snapshot has been created. We want to block here to avoid the chance of this script being run (and the +# current backup deleted / a new backup created) while the snapshot is taking place. The snapshot must also be done +# while the volume is unmounted to ensure data integrity. +while true; do + sleep 60 + snapshot_data=$(aws --region "$aws_region" ec2 describe-snapshots --snapshot-ids "$snapshot_id" || true) + if [ "$(echo "$snapshot_data" | jq -r '.Snapshots[0].State')" = "completed" ]; then + break + fi +done + +if [ -n "$MONGO_BACKUP_PRUNE_OLDER_THAN_DATE" ]; then + # Prune old snapshots + old_snapshot_data="$(aws --region "$aws_region" ec2 describe-snapshots --filters "Name=description,Values=$MONGO_BACKUP_SNAPSHOT_DESC")" + lines_="$(echo "$old_snapshot_data" | jq -r ".Snapshots | map(\"\(.SnapshotId) \(.StartTime)\") | .[]")" + earliest_date="$(date -d "$MONGO_BACKUP_PRUNE_OLDER_THAN_DATE" "+%s")" + while read -r line; do + # each $line looks like: "snap-0123456789DEADBEEF 2019-11-01T00:15:12.492Z" + snapshot_id=$(echo "$line" | cut -f1 -d' ') + timestamp="$(date -d "$(echo "$line" | cut -f2 -d' ')" "+%s")" + if [ "$timestamp" -lt "$earliest_date" ]; then + # this snapshot_id is older than we want to keep around, so delete it + aws --region "$aws_region" ec2 delete-snapshot --snapshot-id "$snapshot_id" + fi + done <<< "$lines_" +fi + +# ping the snitch url if available +if [ -n "$MONGO_BACKUP_SNITCH_URL" ]; then + curl "$MONGO_BACKUP_SNITCH_URL" +fi + +echo "End at $(date)" diff --git a/playbooks/roles/mongo_3_2/templates/mongo_logrotate.j2 b/playbooks/roles/mongo_3_2/templates/mongo_logrotate.j2 index 7086a9a96ff..f2fb4483566 100644 --- a/playbooks/roles/mongo_3_2/templates/mongo_logrotate.j2 +++ b/playbooks/roles/mongo_3_2/templates/mongo_logrotate.j2 @@ -12,6 +12,22 @@ size 1M } +{% if is_backup_node %} +{{ mongo_log_dir }}/mongo-backup.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} +{% endif %} + {{ mongo_log_dir }}/mongodb.log { create compress diff --git a/playbooks/roles/mongo_3_4/defaults/main.yml b/playbooks/roles/mongo_3_4/defaults/main.yml new file mode 100644 index 00000000000..52f57ae7323 --- /dev/null +++ b/playbooks/roles/mongo_3_4/defaults/main.yml @@ -0,0 +1,115 @@ +mongo_logappend: true + +#This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle +mongo_logrotate: reopen + +MONGO_VERSION_MAJOR_MINOR: "3.4" +mongo_version_major_minor: "{{ MONGO_VERSION_MAJOR_MINOR }}" +MONGO_VERSION_PATCH: "24" +mongo_version_patch: "{{ MONGO_VERSION_PATCH }}" +PYMONGO_VERSION: "3.4.0" +pymongo_version: "{{ PYMONGO_VERSION }}" +MONGO_VERSION: "{{ MONGO_VERSION_MAJOR_MINOR }}.{{ MONGO_VERSION_PATCH }}" +mongo_version: "{{ MONGO_VERSION }}" +mongo_port: "27017" +mongo_extra_conf: '' +mongo_key_file: '/etc/mongodb_key' + +mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" +mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" +mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal" +mongo_user: mongodb + +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/{{ mongo_version_major_minor }} multiverse" + +mongodb_debian_pkgs: + - "mongodb-org={{ mongo_version }}" + - "mongodb-org-server={{ mongo_version }}" + - "mongodb-org-shell={{ mongo_version }}" + - "mongodb-org-mongos={{ mongo_version }}" + - "mongodb-org-tools={{ mongo_version }}" + + + +mongo_configure_replica_set: true + +# Vars Meant to be overridden +MONGO_ADMIN_USER: 'admin' +MONGO_ADMIN_PASSWORD: 'password' +MONGO_USERS: + - user: cs_comments_service + password: password + database: cs_comments_service + roles: readWrite + - user: edxapp + password: password + database: edxapp + roles: readWrite + +# This default setting is approriate for a single machine installation +# This will need to be overridden for setups where mongo is on its own server +# and/or you are configuring mongo replication. If the override value is +# 0.0.0.0 mongo will listen on all IPs. The value may also be set to a +# specific IP. +MONGO_BIND_IP: 127.0.0.1 + +MONGO_REPL_SET: "rs0" +MONGO_AUTH: true + +MONGO_CLUSTER_KEY: "CHANGEME" + +# Cluster member configuration +# Fed directly into mongodb_replica_set module +MONGO_RS_CONFIG: + _id: '{{ MONGO_REPL_SET }}' + members: + - host: '127.0.0.1' + +# Storage engine options in 3.2: "mmapv1" or "wiredTiger" +# 3.2 and 3.4 default to wiredTiger +MONGO_STORAGE_ENGINE: "wiredTiger" + +# List of dictionaries as described in the mount_ebs role's default +# for the volumes. +# Useful if you want to store your mongo data and/or journal on separate +# disks from the root volume. By default, they will end up mongo_data_dir +# on the root disk. +MONGO_VOLUMES: [] + +# WiredTiger takes a number of optional configuration settings +# which can be defined as a yaml structure in your secure configuration. +MONGO_STORAGE_ENGINE_OPTIONS: !!null + +mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" +mongo_dbpath: "{{ mongo_data_dir }}/mongodb" + +# In environments that do not require durability (devstack / Jenkins) +# you can disable the journal to reduce disk usage +mongo_enable_journal: true + +MONGO_LOG_SERVERSTATUS: true + +# Vars for configuring a mongo backup node. If enabled, this node will be provisioned with a script that uses mongodump +# to backup the database to an ebs volume at a period set by mongo_backup_cron. +# Set MONGO_BACKUP_ENABLED to true to enable. If enabled, all the other MONGO_BACKUP_ vars must be set according to your +# setup. +MONGO_BACKUP_ENABLED: false +MONGO_BACKUP_NODE: "" # note: most likely the ip address of the instance on which to perform the backups +MONGO_BACKUP_EBS_VOLUME_DEVICE: "" +MONGO_BACKUP_EBS_VOLUME_ID: "" +MONGO_BACKUP_AUTH_DATABASE: "" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE: "" # passed to `date -d`; should be a relative date like "-30days" +MONGO_BACKUP_SNITCH_URL: "" # Optional URL that will be used to ping a monitoring service (such as Dead Man's Snitch) upon successful completion of a backup. +MONGO_BACKUP_VOLUME_MOUNT_PATH: "/mnt/mongo-backup" +MONGO_BACKUP_SNAPSHOT_DESC: "mongo-backup" +mongo_backup_script_path: "/usr/local/sbin/backup-mongo.sh" +mongo_backup_cron: + minute: '12' + hour: '*/12' + day: '*' + month: '*' + weekday: '*' + +# Internal variable set to true dynamically if backups enabled and playbook running on MONGO_BACKUP_NODE. Do not +# manually override. +is_backup_node: false diff --git a/playbooks/roles/mongo_3_4/files/etc/systemd/system/disable-transparent-hugepages.service b/playbooks/roles/mongo_3_4/files/etc/systemd/system/disable-transparent-hugepages.service new file mode 100644 index 00000000000..282c9e122c3 --- /dev/null +++ b/playbooks/roles/mongo_3_4/files/etc/systemd/system/disable-transparent-hugepages.service @@ -0,0 +1,11 @@ +[Unit] +Description="Disable Transparent Hugepage before MongoDB boots" +Before=mongod.service + +[Service] +Type=oneshot +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' + +[Install] +RequiredBy=mongod.service diff --git a/playbooks/roles/mongo_3_4/meta/main.yml b/playbooks/roles/mongo_3_4/meta/main.yml new file mode 100644 index 00000000000..d7223454526 --- /dev/null +++ b/playbooks/roles/mongo_3_4/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - common + - role: mount_ebs + volumes: "{{ MONGO_VOLUMES }}" diff --git a/playbooks/roles/mongo_3_4/tasks/main.yml b/playbooks/roles/mongo_3_4/tasks/main.yml new file mode 100644 index 00000000000..2472e7bcceb --- /dev/null +++ b/playbooks/roles/mongo_3_4/tasks/main.yml @@ -0,0 +1,402 @@ +--- +- name: Add disable transparent huge pages systemd service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + copy: + src: etc/systemd/system/disable-transparent-hugepages.service + dest: "/etc/systemd/system/disable-transparent-hugepages.service" + owner: root + group: root + mode: 0644 + tags: + - "hugepages" + - "install" + - "install:configuration" + +- name: Enable/start disable transparent huge pages service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + service: + name: disable-transparent-hugepages + enabled: yes + state: started + tags: + - "hugepages" + - "manage" + - "manage:start" + +- name: install python pymongo for mongo_user ansible module + pip: + name: pymongo + state: present + version: "{{ pymongo_version }}" + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - "install" + - "install:app-requirements" + +- name: add the mongodb signing key + apt_key: + url: "https://www.mongodb.org/static/pgp/server-{{ mongo_version_major_minor }}.asc" + state: present + retries: 3 + tags: + - "install" + - "install:app-requirements" + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + register: install_mongo_package + with_items: "{{ mongodb_debian_pkgs }}" + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: create mongo dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + with_items: + - "{{ mongo_data_dir }}" + - "{{ mongo_dbpath }}" + - "{{ mongo_log_dir }}" + - "{{ mongo_journal_dir }}" + tags: + - "install" + - "install:app-configuration" + +# This will error when run on a new replica set, so we ignore_errors +# and connect anonymously next. +- name: determine if there is a replica set already + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + run_once: true + register: authed_replica_set_already_configured + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +- name: Try checking the replica set with no user/pass in case this is a new box + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + run_once: true + register: unauthed_replica_set_already_configured + when: authed_replica_set_already_configured.failed is defined + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +# We use these in the templates but also to control a whole bunch of logic +- name: set facts that default to not initializing a replica set + set_fact: + initialize_replica_set: false + skip_replica_set: false + tags: + - "install" + - "install:app-configuration" + - "update_mongod_conf" + - "manage" + - "manage:db-replication" + +# If either auth or unauthed access comes back with a replica set, we +# do not want to initialize one. Since initialization requires a bunch +# of extra templating and restarting, it's not something we want to do on +# existing boxes. +- name: track if you have a replica set + set_fact: + initialize_replica_set: true + skip_replica_set: true + when: authed_replica_set_already_configured.status is not defined + and unauthed_replica_set_already_configured.status is not defined + tags: + - "manage" + - "manage:db-replication" + +- name: warn about unconfigured replica sets + debug: msg="You do not appear to have a Replica Set configured, deploying one for you" + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: copy mongodb key file + copy: + content: "{{ MONGO_CLUSTER_KEY }}" + dest: "{{ mongo_key_file }}" + mode: 0600 + owner: mongodb + group: mongodb + register: update_mongod_key + tags: + - "manage" + - "manage:db-replication" + - "mongodb_key" + +# If skip_replica_set is true, this template will not contain a replica set stanza +# because of the fact above. +- name: copy configuration template + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + register: update_mongod_conf + tags: + - "install" + - "install:app-configuration" + - "manage" + - "manage:db-replication" + - "update_mongod_conf" + +# This sets the is_backup_node var by checking whether +# mongo backups are enabled AND we're currently running against the designated mongo backup node. +# This allows backup-related tasks below to determine whether or not they should run on the current mongo node. +- name: determine if backup tasks should run + set_fact: + is_backup_node: true + when: MONGO_BACKUP_ENABLED and '{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}' == '{{ MONGO_BACKUP_NODE }}' + tags: + - "backup:mongo" + +- name: install logrotate configuration + template: + src: mongo_logrotate.j2 + dest: /etc/logrotate.d/hourly/mongo + tags: + - "backup:mongo" + - "install" + - "install:app-configuration" + - "logrotate" + +- name: install prereqs for backup script + apt: + pkg: "{{ item }}" + state: present + update_cache: yes + with_items: + - jq + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install backup script + template: + src: backup-mongo.sh.j2 + dest: "{{ mongo_backup_script_path }}" + mode: 0700 + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: add mongo backup script to cron + cron: + name: mongo backup job + minute: "{{ mongo_backup_cron.minute | default('12') }}" + hour: "{{ mongo_backup_cron.hour | default('*/12') }}" + day: "{{ mongo_backup_cron.day | default('*') }}" + month: "{{ mongo_backup_cron.month | default('*') }}" + weekday: "{{ mongo_backup_cron.weekday | default('*') }}" + job: "{{ mongo_backup_script_path }} >> {{ mongo_log_dir }}/mongo-backup.log 2>&1" + become: yes + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: format mongo backup volume + filesystem: + dev: "{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" + fstype: ext4 + force: true + ignore_errors: true + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: restart mongo service if we changed our configuration or upgraded mongo + service: + name: mongod + state: restarted + when: update_mongod_conf.changed or update_mongod_key.changed or install_mongo_package.changed + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +# We only try passwordless superuser creation when +# we're initializing the replica set and need to use +# the localhost exemption to create a user who will be +# able to initialize the replica set. +# We can only create the users on one machine, the one +# where we will initialize the replica set. If we +# create users on multiple hosts, then they will fail +# to come into the replica set. +- name: create super user + mongodb_user: + name: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + database: admin + roles: root + when: initialize_replica_set + run_once: true + tags: + - "manage" + - "manage:db-replication" + +# Now that the localhost exemption has been used to create the superuser, we need +# to add replica set to our configuration. This will never happen if we detected +# a replica set in the 'determine if there is a replica set already' task. +- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set + set_fact: + skip_replica_set: false + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: re-copy configuration template with replica set enabled + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: restart mongo service + service: + name: mongod + state: restarted + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: configure replica set + mongodb_replica_set: + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + rs_config: "{{ MONGO_RS_CONFIG }}" + run_once: true + register: replset_status + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + - "manage:db-replication-configuration" + +# During initial replica set configuration, it can take a few seconds to vote +# a primary and for all members to reflect that status. During that window, +# use creation or other writes can fail. The best wait/check seems to be repeatedly +# checking the replica set status until we see a PRIMARY in the results. +- name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list + when: mongo_configure_replica_set + retries: 5 + delay: 2 + run_once: true + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + +- name: create mongodb users in a replica set + mongodb_user: + database: "{{ item.database }}" + login_database: 'admin' + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ MONGO_REPL_SET }}" + with_items: "{{ MONGO_USERS }}" + run_once: true + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-users" + - "manage:db-replication" + +- name: ensure mongo starts at boot time + service: + name: mongod + enabled: yes + tags: + - "manage" + - "manage:start" + +- name: add serverStatus logging script + template: + src: "log-mongo-serverStatus.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + mode: 0700 + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" + +- name: add serverStatus logging script to cron + cron: + name: mongostat logging job + minute: "*/3" + job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 + become: yes + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" diff --git a/playbooks/roles/mongo_3_4/templates/log-mongo-serverStatus.sh.j2 b/playbooks/roles/mongo_3_4/templates/log-mongo-serverStatus.sh.j2 new file mode 100644 index 00000000000..04649d55ad1 --- /dev/null +++ b/playbooks/roles/mongo_3_4/templates/log-mongo-serverStatus.sh.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# Using JSON.stringify forces output of normal JSON, as opposed to Mongo's weird non-compliant extended JSON +/usr/bin/mongo -u {{ MONGO_ADMIN_USER }} --authenticationDatabase admin -p '{{ MONGO_ADMIN_PASSWORD }}' --quiet <<< 'JSON.stringify(db.serverStatus())' diff --git a/playbooks/roles/mongo_3_4/templates/mongo_logrotate.j2 b/playbooks/roles/mongo_3_4/templates/mongo_logrotate.j2 new file mode 100644 index 00000000000..f2fb4483566 --- /dev/null +++ b/playbooks/roles/mongo_3_4/templates/mongo_logrotate.j2 @@ -0,0 +1,46 @@ +{{ mongo_log_dir }}/serverStatus.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} + +{% if is_backup_node %} +{{ mongo_log_dir }}/mongo-backup.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} +{% endif %} + +{{ mongo_log_dir }}/mongodb.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M + postrotate + /usr/bin/killall -USR1 mongod + endscript +} diff --git a/playbooks/roles/mongo_3_4/templates/mongod.conf.j2 b/playbooks/roles/mongo_3_4/templates/mongod.conf.j2 new file mode 100644 index 00000000000..b7d4b4a1efe --- /dev/null +++ b/playbooks/roles/mongo_3_4/templates/mongod.conf.j2 @@ -0,0 +1,46 @@ +# {{ ansible_managed }} +# mongodb.conf + + +storage: + # Where to store the data. + dbPath: {{ mongo_dbpath }} + # Storage Engine + engine: {{ MONGO_STORAGE_ENGINE }} + # Enable journaling, http://www.mongodb.org/display/DOCS/Journaling + journal: +{% if mongo_enable_journal %} + enabled: true +{% else %} + enabled: false +{% endif %} +{% if MONGO_STORAGE_ENGINE_OPTIONS %} + {{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }} +{% endif %} + +systemLog: + #where to log + destination: file + path: "{{ mongo_logpath }}" +{% if mongo_logappend %} + logAppend: true +{% else %} + logAppend: false +{% endif %} + logRotate: {{ mongo_logrotate }} + +{% if not skip_replica_set %} +replication: + replSetName: {{ MONGO_REPL_SET }} + +security: + authorization: {{ MONGO_AUTH | ternary("enabled", "disabled") }} + keyFile: {{ mongo_key_file }} + +{% endif %} +net: + bindIp: {{ MONGO_BIND_IP }} + port: {{ mongo_port }} + + +{{ mongo_extra_conf }} diff --git a/playbooks/roles/mongo_3_6/defaults/main.yml b/playbooks/roles/mongo_3_6/defaults/main.yml new file mode 100644 index 00000000000..e401edb4c37 --- /dev/null +++ b/playbooks/roles/mongo_3_6/defaults/main.yml @@ -0,0 +1,115 @@ +mongo_logappend: true + +#This way, when mongod receives a SIGUSR1, it'll close and reopen its log file handle +mongo_logrotate: reopen + +MONGO_VERSION_MAJOR_MINOR: "3.6" +mongo_version_major_minor: "{{ MONGO_VERSION_MAJOR_MINOR }}" +MONGO_VERSION_PATCH: "17" +mongo_version_patch: "{{ MONGO_VERSION_PATCH }}" +PYMONGO_VERSION: "3.6.1" +pymongo_version: "{{ PYMONGO_VERSION }}" +MONGO_VERSION: "{{ MONGO_VERSION_MAJOR_MINOR }}.{{ MONGO_VERSION_PATCH }}" +mongo_version: "{{ MONGO_VERSION }}" +mongo_port: "27017" +mongo_extra_conf: '' +mongo_key_file: '/etc/mongodb_key' + +mongo_data_dir: "{{ COMMON_DATA_DIR }}/mongo" +mongo_log_dir: "{{ COMMON_LOG_DIR }}/mongo" +mongo_journal_dir: "{{ COMMON_DATA_DIR }}/mongo/mongodb/journal" +mongo_user: mongodb + +MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu {{ ansible_distribution_release }}/mongodb-org/{{ mongo_version_major_minor }} multiverse" + +mongodb_debian_pkgs: + - "mongodb-org={{ mongo_version }}" + - "mongodb-org-server={{ mongo_version }}" + - "mongodb-org-shell={{ mongo_version }}" + - "mongodb-org-mongos={{ mongo_version }}" + - "mongodb-org-tools={{ mongo_version }}" + + + +mongo_configure_replica_set: true + +# Vars Meant to be overridden +MONGO_ADMIN_USER: 'admin' +MONGO_ADMIN_PASSWORD: 'password' +MONGO_USERS: + - user: cs_comments_service + password: password + database: cs_comments_service + roles: readWrite + - user: edxapp + password: password + database: edxapp + roles: readWrite + +# This default setting is approriate for a single machine installation +# This will need to be overridden for setups where mongo is on its own server +# and/or you are configuring mongo replication. If the override value is +# 0.0.0.0 mongo will listen on all IPs. The value may also be set to a +# specific IP. +MONGO_BIND_IP: 127.0.0.1 + +MONGO_REPL_SET: "rs0" +MONGO_AUTH: true + +MONGO_CLUSTER_KEY: "CHANGEME" + +# Cluster member configuration +# Fed directly into mongodb_replica_set module +MONGO_RS_CONFIG: + _id: '{{ MONGO_REPL_SET }}' + members: + - host: '127.0.0.1' + +# Storage engine options in 3.2: "mmapv1" or "wiredTiger" +# 3.2 and 3.4 default to wiredTiger +MONGO_STORAGE_ENGINE: "wiredTiger" + +# List of dictionaries as described in the mount_ebs role's default +# for the volumes. +# Useful if you want to store your mongo data and/or journal on separate +# disks from the root volume. By default, they will end up mongo_data_dir +# on the root disk. +MONGO_VOLUMES: [] + +# WiredTiger takes a number of optional configuration settings +# which can be defined as a yaml structure in your secure configuration. +MONGO_STORAGE_ENGINE_OPTIONS: !!null + +mongo_logpath: "{{ mongo_log_dir }}/mongodb.log" +mongo_dbpath: "{{ mongo_data_dir }}/mongodb" + +# In environments that do not require durability (devstack / Jenkins) +# you can disable the journal to reduce disk usage +mongo_enable_journal: true + +MONGO_LOG_SERVERSTATUS: true + +# Vars for configuring a mongo backup node. If enabled, this node will be provisioned with a script that uses mongodump +# to backup the database to an ebs volume at a period set by mongo_backup_cron. +# Set MONGO_BACKUP_ENABLED to true to enable. If enabled, all the other MONGO_BACKUP_ vars must be set according to your +# setup. +MONGO_BACKUP_ENABLED: false +MONGO_BACKUP_NODE: "" # note: most likely the ip address of the instance on which to perform the backups +MONGO_BACKUP_EBS_VOLUME_DEVICE: "" +MONGO_BACKUP_EBS_VOLUME_ID: "" +MONGO_BACKUP_AUTH_DATABASE: "" +MONGO_BACKUP_PRUNE_OLDER_THAN_DATE: "" # passed to `date -d`; should be a relative date like "-30days" +MONGO_BACKUP_SNITCH_URL: "" # Optional URL that will be used to ping a monitoring service (such as Dead Man's Snitch) upon successful completion of a backup. +MONGO_BACKUP_VOLUME_MOUNT_PATH: "/mnt/mongo-backup" +MONGO_BACKUP_SNAPSHOT_DESC: "mongo-backup" +mongo_backup_script_path: "/usr/local/sbin/backup-mongo.sh" +mongo_backup_cron: + minute: '12' + hour: '*/12' + day: '*' + month: '*' + weekday: '*' + +# Internal variable set to true dynamically if backups enabled and playbook running on MONGO_BACKUP_NODE. Do not +# manually override. +is_backup_node: false diff --git a/playbooks/roles/mongo_3_6/files/etc/systemd/system/disable-transparent-hugepages.service b/playbooks/roles/mongo_3_6/files/etc/systemd/system/disable-transparent-hugepages.service new file mode 100644 index 00000000000..282c9e122c3 --- /dev/null +++ b/playbooks/roles/mongo_3_6/files/etc/systemd/system/disable-transparent-hugepages.service @@ -0,0 +1,11 @@ +[Unit] +Description="Disable Transparent Hugepage before MongoDB boots" +Before=mongod.service + +[Service] +Type=oneshot +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' +ExecStart=/bin/bash -c 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' + +[Install] +RequiredBy=mongod.service diff --git a/playbooks/roles/mongo_3_6/meta/main.yml b/playbooks/roles/mongo_3_6/meta/main.yml new file mode 100644 index 00000000000..d7223454526 --- /dev/null +++ b/playbooks/roles/mongo_3_6/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - common + - role: mount_ebs + volumes: "{{ MONGO_VOLUMES }}" diff --git a/playbooks/roles/mongo_3_6/tasks/main.yml b/playbooks/roles/mongo_3_6/tasks/main.yml new file mode 100644 index 00000000000..2472e7bcceb --- /dev/null +++ b/playbooks/roles/mongo_3_6/tasks/main.yml @@ -0,0 +1,402 @@ +--- +- name: Add disable transparent huge pages systemd service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + copy: + src: etc/systemd/system/disable-transparent-hugepages.service + dest: "/etc/systemd/system/disable-transparent-hugepages.service" + owner: root + group: root + mode: 0644 + tags: + - "hugepages" + - "install" + - "install:configuration" + +- name: Enable/start disable transparent huge pages service (http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/) + service: + name: disable-transparent-hugepages + enabled: yes + state: started + tags: + - "hugepages" + - "manage" + - "manage:start" + +- name: install python pymongo for mongo_user ansible module + pip: + name: pymongo + state: present + version: "{{ pymongo_version }}" + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" + tags: + - "install" + - "install:app-requirements" + +- name: add the mongodb signing key + apt_key: + url: "https://www.mongodb.org/static/pgp/server-{{ mongo_version_major_minor }}.asc" + state: present + retries: 3 + tags: + - "install" + - "install:app-requirements" + +- name: add the mongodb repo to the sources list + apt_repository: + repo: "{{ MONGODB_REPO }}" + state: present + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install mongo server and recommends + apt: + pkg: "{{ item }}" + state: present + install_recommends: yes + force: yes + update_cache: yes + register: install_mongo_package + with_items: "{{ mongodb_debian_pkgs }}" + tags: + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: create mongo dirs + file: + path: "{{ item }}" + state: directory + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + with_items: + - "{{ mongo_data_dir }}" + - "{{ mongo_dbpath }}" + - "{{ mongo_log_dir }}" + - "{{ mongo_journal_dir }}" + tags: + - "install" + - "install:app-configuration" + +# This will error when run on a new replica set, so we ignore_errors +# and connect anonymously next. +- name: determine if there is a replica set already + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + run_once: true + register: authed_replica_set_already_configured + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +- name: Try checking the replica set with no user/pass in case this is a new box + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + run_once: true + register: unauthed_replica_set_already_configured + when: authed_replica_set_already_configured.failed is defined + ignore_errors: true + tags: + - "manage" + - "manage:db-replication" + +# We use these in the templates but also to control a whole bunch of logic +- name: set facts that default to not initializing a replica set + set_fact: + initialize_replica_set: false + skip_replica_set: false + tags: + - "install" + - "install:app-configuration" + - "update_mongod_conf" + - "manage" + - "manage:db-replication" + +# If either auth or unauthed access comes back with a replica set, we +# do not want to initialize one. Since initialization requires a bunch +# of extra templating and restarting, it's not something we want to do on +# existing boxes. +- name: track if you have a replica set + set_fact: + initialize_replica_set: true + skip_replica_set: true + when: authed_replica_set_already_configured.status is not defined + and unauthed_replica_set_already_configured.status is not defined + tags: + - "manage" + - "manage:db-replication" + +- name: warn about unconfigured replica sets + debug: msg="You do not appear to have a Replica Set configured, deploying one for you" + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: copy mongodb key file + copy: + content: "{{ MONGO_CLUSTER_KEY }}" + dest: "{{ mongo_key_file }}" + mode: 0600 + owner: mongodb + group: mongodb + register: update_mongod_key + tags: + - "manage" + - "manage:db-replication" + - "mongodb_key" + +# If skip_replica_set is true, this template will not contain a replica set stanza +# because of the fact above. +- name: copy configuration template + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + register: update_mongod_conf + tags: + - "install" + - "install:app-configuration" + - "manage" + - "manage:db-replication" + - "update_mongod_conf" + +# This sets the is_backup_node var by checking whether +# mongo backups are enabled AND we're currently running against the designated mongo backup node. +# This allows backup-related tasks below to determine whether or not they should run on the current mongo node. +- name: determine if backup tasks should run + set_fact: + is_backup_node: true + when: MONGO_BACKUP_ENABLED and '{{ ansible_default_ipv4.address|default(ansible_all_ipv4_addresses[0]) }}' == '{{ MONGO_BACKUP_NODE }}' + tags: + - "backup:mongo" + +- name: install logrotate configuration + template: + src: mongo_logrotate.j2 + dest: /etc/logrotate.d/hourly/mongo + tags: + - "backup:mongo" + - "install" + - "install:app-configuration" + - "logrotate" + +- name: install prereqs for backup script + apt: + pkg: "{{ item }}" + state: present + update_cache: yes + with_items: + - jq + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + - "install:app-requirements" + - "mongo_packages" + +- name: install backup script + template: + src: backup-mongo.sh.j2 + dest: "{{ mongo_backup_script_path }}" + mode: 0700 + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: add mongo backup script to cron + cron: + name: mongo backup job + minute: "{{ mongo_backup_cron.minute | default('12') }}" + hour: "{{ mongo_backup_cron.hour | default('*/12') }}" + day: "{{ mongo_backup_cron.day | default('*') }}" + month: "{{ mongo_backup_cron.month | default('*') }}" + weekday: "{{ mongo_backup_cron.weekday | default('*') }}" + job: "{{ mongo_backup_script_path }} >> {{ mongo_log_dir }}/mongo-backup.log 2>&1" + become: yes + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: format mongo backup volume + filesystem: + dev: "{{ MONGO_BACKUP_EBS_VOLUME_DEVICE }}" + fstype: ext4 + force: true + ignore_errors: true + when: + - is_backup_node + tags: + - "backup:mongo" + - "install" + +- name: restart mongo service if we changed our configuration or upgraded mongo + service: + name: mongod + state: restarted + when: update_mongod_conf.changed or update_mongod_key.changed or install_mongo_package.changed + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + tags: + - "manage" + - "manage:start" + - "manage:db-replication" + +# We only try passwordless superuser creation when +# we're initializing the replica set and need to use +# the localhost exemption to create a user who will be +# able to initialize the replica set. +# We can only create the users on one machine, the one +# where we will initialize the replica set. If we +# create users on multiple hosts, then they will fail +# to come into the replica set. +- name: create super user + mongodb_user: + name: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + database: admin + roles: root + when: initialize_replica_set + run_once: true + tags: + - "manage" + - "manage:db-replication" + +# Now that the localhost exemption has been used to create the superuser, we need +# to add replica set to our configuration. This will never happen if we detected +# a replica set in the 'determine if there is a replica set already' task. +- name: Unset our skip initializing replica set fact so that mongod.conf gets a replica set + set_fact: + skip_replica_set: false + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: re-copy configuration template with replica set enabled + template: + src: mongod.conf.j2 + dest: /etc/mongod.conf + backup: yes + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: restart mongo service + service: + name: mongod + state: restarted + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: wait for mongo server to start + wait_for: + port: 27017 + delay: 2 + when: initialize_replica_set + tags: + - "manage" + - "manage:db-replication" + +- name: configure replica set + mongodb_replica_set: + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + rs_config: "{{ MONGO_RS_CONFIG }}" + run_once: true + register: replset_status + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + - "manage:db-replication-configuration" + +# During initial replica set configuration, it can take a few seconds to vote +# a primary and for all members to reflect that status. During that window, +# use creation or other writes can fail. The best wait/check seems to be repeatedly +# checking the replica set status until we see a PRIMARY in the results. +- name: Wait for the replica set to update and (if needed) elect a primary + mongodb_rs_status: + host: "{{ ansible_lo['ipv4']['address'] }}" + username: "{{ MONGO_ADMIN_USER }}" + password: "{{ MONGO_ADMIN_PASSWORD }}" + register: status + until: status.status is defined and 'PRIMARY' in status.status.members|map(attribute='stateStr')|list + when: mongo_configure_replica_set + retries: 5 + delay: 2 + run_once: true + tags: + - "manage" + - "manage:db" + - "manage:db-replication" + +- name: create mongodb users in a replica set + mongodb_user: + database: "{{ item.database }}" + login_database: 'admin' + login_user: "{{ MONGO_ADMIN_USER }}" + login_password: "{{ MONGO_ADMIN_PASSWORD }}" + name: "{{ item.user }}" + password: "{{ item.password }}" + roles: "{{ item.roles }}" + state: present + replica_set: "{{ MONGO_REPL_SET }}" + with_items: "{{ MONGO_USERS }}" + run_once: true + when: mongo_configure_replica_set + tags: + - "manage" + - "manage:db" + - "manage:db-users" + - "manage:db-replication" + +- name: ensure mongo starts at boot time + service: + name: mongod + enabled: yes + tags: + - "manage" + - "manage:start" + +- name: add serverStatus logging script + template: + src: "log-mongo-serverStatus.sh.j2" + dest: "{{ COMMON_BIN_DIR }}/log-mongo-serverStatus.sh" + owner: "{{ mongo_user }}" + group: "{{ mongo_user }}" + mode: 0700 + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" + +- name: add serverStatus logging script to cron + cron: + name: mongostat logging job + minute: "*/3" + job: /edx/bin/log-mongo-serverStatus.sh >> {{ mongo_log_dir }}/serverStatus.log 2>&1 + become: yes + when: MONGO_LOG_SERVERSTATUS + tags: + - "install" + - "install:app-configuration" diff --git a/playbooks/roles/mongo_3_6/templates/log-mongo-serverStatus.sh.j2 b/playbooks/roles/mongo_3_6/templates/log-mongo-serverStatus.sh.j2 new file mode 100644 index 00000000000..04649d55ad1 --- /dev/null +++ b/playbooks/roles/mongo_3_6/templates/log-mongo-serverStatus.sh.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# Using JSON.stringify forces output of normal JSON, as opposed to Mongo's weird non-compliant extended JSON +/usr/bin/mongo -u {{ MONGO_ADMIN_USER }} --authenticationDatabase admin -p '{{ MONGO_ADMIN_PASSWORD }}' --quiet <<< 'JSON.stringify(db.serverStatus())' diff --git a/playbooks/roles/mongo_3_6/templates/mongo_logrotate.j2 b/playbooks/roles/mongo_3_6/templates/mongo_logrotate.j2 new file mode 100644 index 00000000000..f2fb4483566 --- /dev/null +++ b/playbooks/roles/mongo_3_6/templates/mongo_logrotate.j2 @@ -0,0 +1,46 @@ +{{ mongo_log_dir }}/serverStatus.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} + +{% if is_backup_node %} +{{ mongo_log_dir }}/mongo-backup.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M +} +{% endif %} + +{{ mongo_log_dir }}/mongodb.log { + create + compress + copytruncate + delaycompress + dateext + dateformat -%Y%m%d-%s + missingok + notifempty + daily + rotate 90 + size 1M + postrotate + /usr/bin/killall -USR1 mongod + endscript +} diff --git a/playbooks/roles/mongo_3_6/templates/mongod.conf.j2 b/playbooks/roles/mongo_3_6/templates/mongod.conf.j2 new file mode 100644 index 00000000000..b7d4b4a1efe --- /dev/null +++ b/playbooks/roles/mongo_3_6/templates/mongod.conf.j2 @@ -0,0 +1,46 @@ +# {{ ansible_managed }} +# mongodb.conf + + +storage: + # Where to store the data. + dbPath: {{ mongo_dbpath }} + # Storage Engine + engine: {{ MONGO_STORAGE_ENGINE }} + # Enable journaling, http://www.mongodb.org/display/DOCS/Journaling + journal: +{% if mongo_enable_journal %} + enabled: true +{% else %} + enabled: false +{% endif %} +{% if MONGO_STORAGE_ENGINE_OPTIONS %} + {{ MONGO_STORAGE_ENGINE_OPTIONS | to_nice_yaml }} +{% endif %} + +systemLog: + #where to log + destination: file + path: "{{ mongo_logpath }}" +{% if mongo_logappend %} + logAppend: true +{% else %} + logAppend: false +{% endif %} + logRotate: {{ mongo_logrotate }} + +{% if not skip_replica_set %} +replication: + replSetName: {{ MONGO_REPL_SET }} + +security: + authorization: {{ MONGO_AUTH | ternary("enabled", "disabled") }} + keyFile: {{ mongo_key_file }} + +{% endif %} +net: + bindIp: {{ MONGO_BIND_IP }} + port: {{ mongo_port }} + + +{{ mongo_extra_conf }} diff --git a/playbooks/roles/mongo_client/defaults/main.yml b/playbooks/roles/mongo_client/defaults/main.yml index f48eee8b0e4..69fcf1e618e 100644 --- a/playbooks/roles/mongo_client/defaults/main.yml +++ b/playbooks/roles/mongo_client/defaults/main.yml @@ -2,8 +2,9 @@ MONGO_CLIENT_MONGODB_APT_KEY: "7F0CEB10" MONGO_CLIENT_MONGODB_APT_KEYSERVER: "keyserver.ubuntu.com" MONGO_CLIENT_MONGODB_REPO: "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" -mongo_client_version: 3.2.12 +MONGO_CLIENT_VERSION: 3.2.12 +mongo_client_version: "{{ MONGO_CLIENT_VERSION }}" mongo_client_debian_pkgs: - - "mongodb-org-shell={{ mongo_client_version }}" + - "mongodb-org-shell={{ mongo_client_version }}" - "mongodb-org-tools={{ mongo_client_version }}" diff --git a/playbooks/roles/mongo_mms/defaults/main.yml b/playbooks/roles/mongo_mms/defaults/main.yml index 80522e46a11..86c208a2b58 100644 --- a/playbooks/roles/mongo_mms/defaults/main.yml +++ b/playbooks/roles/mongo_mms/defaults/main.yml @@ -6,10 +6,10 @@ os_version: "ubuntu1604" agents: - agent: mongodb-mms-monitoring-agent - version: "6.6.2.464-1" + version: "7.2.0.488-1" config: "/etc/mongodb-mms/monitoring-agent.config" dir: "monitoring" - agent: mongodb-mms-backup-agent - version: "6.8.3.1002-1" + version: "7.8.1.1109-1" config: "/etc/mongodb-mms/backup-agent.config" dir: "backup" diff --git a/playbooks/roles/mount_ebs/tasks/main.yml b/playbooks/roles/mount_ebs/tasks/main.yml index 786568992dd..6b369b784a4 100644 --- a/playbooks/roles/mount_ebs/tasks/main.yml +++ b/playbooks/roles/mount_ebs/tasks/main.yml @@ -15,12 +15,21 @@ # This role ensures that the correct EBS volumes are mounted to the right locations. # If the volumes are already mounted to the correct place, this role does nothing. +# Newer AWS EC2 instances sometimes swap the order of the disks, resulting in a very small data volume and a very large +# journal volume. This prevents that by confirming that the disk sizes are correct before proceeding. Rebooting seems to +# fix the ordering +- name: Check disk size + assert: + that: + - "{{ ansible_devices[item.device.split('/')[-1]]['size'] == item.size }}" + fail_msg: "Actual size {{ ansible_devices[item.device.split('/')[-1]]['size'] }} != Expected size {{ item.size }}. Rebooting the instance may fix the ordering issue" + with_items: "{{ volumes }}" + # This task will be skipped if UNMOUNT_DISKS is false, causing the next task # to error if the disk has the wrong fstype but is already mounted - name: Unmount disk if fstype is wrong mount: name: "{{ (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'mount': None})).mount }}" - src: "{{ (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'device': None})).device }}" fstype: "{{ (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'fstype': None})).fstype }}" state: unmounted when: "UNMOUNT_DISKS and (ansible_mounts | selectattr('device', 'equalto', item.device) | first | default({'fstype': None})).fstype != item.fstype" @@ -46,6 +55,9 @@ force: "{{ FORCE_REFORMAT_DISKS }}" with_items: "{{ volumes }}" +- name: Regather facts to get UUIDs of freshly formatted disks + setup: "" + # This can fail if one volume is mounted on a child directory as another volume # and it attempts to unmount the parent first. This is generally fixable by rerunning. # Order is super dependent here, but we're iterating ansible_mounts (in order to identify @@ -56,7 +68,6 @@ - name: Unmount disks mounted to the wrong place mount: name: "{{ item.mount }}" - src: "{{ item.device }}" fstype: "{{ item.fstype }}" state: unmounted when: @@ -75,10 +86,11 @@ (volumes | selectattr('device', 'equalto', item.device) | first).mount != item.mount with_items: "{{ ansible_mounts }}" +# Use UUID to prevent issues with AWS EC2 swapping device order - name: Mount disks mount: name: "{{ item.mount }}" - src: "{{ item.device }}" + src: "UUID={{ ansible_devices[item.device.split('/')[-1]]['links']['uuids'][0] }}" state: mounted fstype: "{{ item.fstype }}" opts: "{{ item.options }}" diff --git a/playbooks/roles/mysql/defaults/main.yml b/playbooks/roles/mysql/defaults/main.yml index ec93e5960c1..b7949eaa4ae 100644 --- a/playbooks/roles/mysql/defaults/main.yml +++ b/playbooks/roles/mysql/defaults/main.yml @@ -7,3 +7,8 @@ mysql_debian_pkgs: MYSQL_BIND_IP: "127.0.0.1" MYSQL_REMOTE_ROOT_PASSWORD: !!null + +mysql_dir: /etc/mysql + +DEFAULT_MYSQL_CHARACTER_SET: utf8 +DEFAULT_MYSQL_COLLATION: utf8_general_ci diff --git a/playbooks/roles/mysql/tasks/mysql.yml b/playbooks/roles/mysql/tasks/mysql.yml index 9741141f44e..12b1e81174e 100644 --- a/playbooks/roles/mysql/tasks/mysql.yml +++ b/playbooks/roles/mysql/tasks/mysql.yml @@ -35,10 +35,9 @@ - name: Install mysql-5.6 and dependencies apt: - name: "{{ item }}" + name: "{{ mysql_debian_pkgs }}" install_recommends: yes state: present - with_items: "{{ mysql_debian_pkgs }}" - name: Set bind IP ini_file: @@ -51,6 +50,14 @@ - {section: "mysqld", option: "bind-address", value: "{{ MYSQL_BIND_IP }}"} register: mysql_conf_updated +- name: Set default character sets and collations + template: + src: default_character_sets_and_collations.cnf.j2 + dest: "{{ mysql_dir }}/mysql.conf.d/default_character_sets_and_collations.cnf" + owner: root + group: root + mode: 0644 + - name: Restart mysql service: name: mysql diff --git a/playbooks/roles/mysql/templates/default_character_sets_and_collations.cnf.j2 b/playbooks/roles/mysql/templates/default_character_sets_and_collations.cnf.j2 new file mode 100644 index 00000000000..14f5556f4f7 --- /dev/null +++ b/playbooks/roles/mysql/templates/default_character_sets_and_collations.cnf.j2 @@ -0,0 +1,17 @@ + +# {{ ansible_managed }} + +# This does not change any existing databases or rows, only the defaults for newly created databases + +[client] +default-character-set={{ DEFAULT_MYSQL_CHARACTER_SET }} + +[mysql] +default-character-set={{ DEFAULT_MYSQL_CHARACTER_SET }} + + +[mysqld] +collation-server = {{ DEFAULT_MYSQL_COLLATION }} +init-connect='SET NAMES {{ DEFAULT_MYSQL_CHARACTER_SET }}' +character-set-server = {{ DEFAULT_MYSQL_CHARACTER_SET }} + diff --git a/playbooks/roles/neo4j/defaults/main.yml b/playbooks/roles/neo4j/defaults/main.yml index dfd2de6af59..065023831cc 100644 --- a/playbooks/roles/neo4j/defaults/main.yml +++ b/playbooks/roles/neo4j/defaults/main.yml @@ -22,7 +22,8 @@ NEO4J_AUTH_ENABLED: "true" neo4j_gpg_key_url: https://debian.neo4j.org/neotechnology.gpg.key neo4j_apt_repository: "deb http://debian.neo4j.org/repo stable/" neo4j_defaults_file: "/etc/default/neo4j" -neo4j_version: "3.2.2" +NEO4J_VERSION: "3.2.2" +neo4j_version: "{{ NEO4J_VERSION }}" neo4j_server_config_file: "/etc/neo4j/neo4j.conf" neo4j_https_port: 7473 # default in package is 7473 neo4j_http_port: 7474 # default in package is 7474 diff --git a/playbooks/roles/newrelic_infrastructure/defaults/main.yml b/playbooks/roles/newrelic_infrastructure/defaults/main.yml index b27f62a0200..936fd66d2e7 100644 --- a/playbooks/roles/newrelic_infrastructure/defaults/main.yml +++ b/playbooks/roles/newrelic_infrastructure/defaults/main.yml @@ -17,13 +17,13 @@ NEWRELIC_INFRASTRUCTURE_LICENSE_KEY: "SPECIFY_KEY_HERE" NEWRELIC_INFRASTRUCTURE_DEBIAN_REPO: 'deb https://download.newrelic.com/infrastructure_agent/linux/apt {{ ansible_distribution_release }} main' -NEWRELIC_INFRASTRUCTURE_DEBIAN_KEY_URL: 'https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg' +NEWRELIC_INFRASTRUCTURE_DEBIAN_REPO_XENIAL: 'deb https://download.newrelic.com/infrastructure_agent/linux/apt xenial main' +NEWRELIC_INFRASTRUCTURE_KEY_URL: 'https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg' # Any extra config you want to specify # https://docs.newrelic.com/docs/infrastructure/new-relic-infrastructure/configuration/infrastructure-config-file-template-newrelic-infrayml NEWRELIC_INFRASTRUCTURE_EXTRA_CONFIG: '' -NEWRELIC_INFRASTRUCTURE_AMAZON_REPO: 'https://download.newrelic.com/infrastructure_agent/linux/yum/el/6/x86_64/newrelic-infra.repo' - +NEWRELIC_INFRASTRUCTURE_AMAZON_REPO: 'https://download.newrelic.com/infrastructure_agent/linux/yum/el/6/x86_64' # # OS packages # diff --git a/playbooks/roles/newrelic_infrastructure/tasks/main.yml b/playbooks/roles/newrelic_infrastructure/tasks/main.yml index 9b78b25dd0d..b766e2ba879 100644 --- a/playbooks/roles/newrelic_infrastructure/tasks/main.yml +++ b/playbooks/roles/newrelic_infrastructure/tasks/main.yml @@ -33,14 +33,15 @@ - name: Add apt key for New Relic Infrastructure apt_key: - url: "{{ NEWRELIC_INFRASTRUCTURE_DEBIAN_KEY_URL }}" + url: "{{ NEWRELIC_INFRASTRUCTURE_KEY_URL }}" state: present tags: - install - install:app-requirements when: ansible_distribution == 'Ubuntu' -- name: Install apt repository for New Relic Infrastructure +# For bionic, use the xenial repo for now. +- name: Install apt repository for New Relic Infrastructure if not bionic apt_repository: repo: "{{ NEWRELIC_INFRASTRUCTURE_DEBIAN_REPO }}" state: present @@ -48,23 +49,37 @@ tags: - install - install:app-requirements - when: ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' and ansible_distribution_release != 'bionic' + +# For bionic, use the xenial repo for now. +- name: Install apt repository for New Relic Infrastructure if bionic + apt_repository: + repo: "{{ NEWRELIC_INFRASTRUCTURE_DEBIAN_REPO_XENIAL }}" + state: present + update_cache: yes + tags: + - install + - install:app-requirements + when: ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'bionic' - name: Install newrelic related system packages for Ubuntu apt: - name: "{{ item }}" + name: "{{ newrelic_infrastructure_debian_pkgs }}" install_recommends: yes state: latest tags: - install - install:app-requirements - with_items: "{{ newrelic_infrastructure_debian_pkgs }}" when: ansible_distribution == 'Ubuntu' - name: Configure the New Relic Servers yum repository - yum: - name: "{{ NEWRELIC_AMAZON_REPO }}" + yum_repository: + name: "newrelic-infra" + baseurl: "{{ NEWRELIC_INFRASTRUCTURE_AMAZON_REPO }}" + gpgkey: "{{ NEWRELIC_INFRASTRUCTURE_KEY_URL }}" + gpgcheck: "yes" state: present + description: New Relic Infrastructure tags: - install - install:app-requirements @@ -72,10 +87,10 @@ - name: Install newrelic related system packages for Amazon yum: - name: "{{ item }}" + name: "{{ newrelic_infrastructure_redhat_pkgs }}" + enablerepo: "newrelic-infra" state: latest update_cache: yes - with_items: "{{ newrelic_infrastructure_redhat_pkgs }}" tags: - install - install:app-requirements @@ -89,12 +104,16 @@ group: root mode: u=rwx,g=r,o=r +# In Ubuntu>16.04, /etc/rc.local doesn't exist by default. Since this script isn't +# used by the GoCD instance using Ubuntu 18.04, skip this task when running on Bionic. +# See OPS-3341 and http://manpages.ubuntu.com/manpages/bionic/man8/systemd-rc-local-generator.8.html - name: Run newrelic display name script on boot lineinfile: dest: "/etc/rc.local" line: "/edx/bin/write_nr_display_name_config.sh" insertbefore: "exit 0" mode: "u+x,g+x" + when: ansible_distribution_release != 'bionic' - name: Restart the infrastructure agent if the license key changes service: diff --git a/playbooks/roles/nginx/defaults/main.yml b/playbooks/roles/nginx/defaults/main.yml index ca7e4e61fe2..9cf06b13091 100644 --- a/playbooks/roles/nginx/defaults/main.yml +++ b/playbooks/roles/nginx/defaults/main.yml @@ -29,7 +29,11 @@ NGINX_CMS_CUSTOM_DOMAINS: [] NGINX_EDXAPP_EXTRA_SITES: [] NGINX_EDXAPP_EXTRA_CONFIGS: [] -NGINX_EDXAPP_CUSTOM_REDIRECTS: {} + +# Override these vars to alter the memory allocated to map_hash +NGINX_OVERRIDE_DEFAULT_MAP_HASH_SIZE: False +NGINX_MAP_HASH_MAX_SIZE: 2048 +NGINX_MAP_HASH_BUCKET_SIZE: 64 # Override these vars for adding user to nginx.htpasswd NGINX_USERS: @@ -37,8 +41,14 @@ NGINX_USERS: password: "{{ COMMON_HTPASSWD_PASS }}" state: present -NGINX_HEALTH_CHECK_ENABLED: False -NGINX_HEALTH_CHECK_USER_AGENT: "" +# Override these vars for adding user to nginx.htpasswd +# for prospectus preview basic auth +PROSPECTUS_PREVIEW_HTPASSWD_USER: "{{ COMMON_HTPASSWD_USER }}" +PROSPECTUS_PREVIEW_HTPASSWD_PASS: "{{ COMMON_HTPASSWD_PASS }}" +PROSPECTUS_PREVIEW_NGINX_USERS: + - name: "{{ PROSPECTUS_PREVIEW_HTPASSWD_USER }}" + password: "{{ PROSPECTUS_PREVIEW_HTPASSWD_PASS }}" + state: present NGINX_ENABLE_SSL: False NGINX_REDIRECT_TO_HTTPS: False @@ -58,8 +68,8 @@ NGINX_HTTPS_REDIRECT_STRATEGY: "scheme" NGINX_SSL_CERTIFICATE: 'ssl-cert-snakeoil.pem' NGINX_SSL_KEY: 'ssl-cert-snakeoil.key' -NGINX_SSL_CIPHERS: "'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'" -NGINX_SSL_PROTOCOLS: "TLSv1.3 TLSv1.2" +NGINX_SSL_CIPHERS: "'ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA'" +NGINX_SSL_PROTOCOLS: "TLSv1.1 TLSv1.2" NGINX_DH_PARAMS_PATH: "/etc/ssl/private/dhparams.pem" NGINX_DH_KEYSIZE: 2048 @@ -67,6 +77,10 @@ NGINX_DH_KEYSIZE: 2048 NGINX_SSL_CERTIFICATE_PATH: '/etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}' NGINX_SSL_KEY_PATH: '/etc/ssl/private/{{ NGINX_SSL_KEY|basename }}' +# This can be one of 'p_combined' or 'ssl_combined' by default. If you +# wish to specify your own format then define it in a configuration file +# located under `nginx_conf_dir` and then use the format name specified +# in your configuration file. NGINX_LOG_FORMAT_NAME: 'p_combined' # When set to False, nginx will pass X-Forwarded-For, X-Forwarded-Port, # and X-Forwarded-Proto headers through to the backend unmodified. @@ -91,6 +105,7 @@ NGINX_SERVER_ERROR_STYLE_H1: 'font-family: "Helvetica Neue",Helvetica,Roboto,Ari NGINX_SERVER_ERROR_STYLE_P_H2: 'font-family: "Helvetica Neue",Helvetica,Roboto,Arial,sans-serif; margin-bottom: .3em; line-height: 1.25em; text-rendering: optimizeLegibility; font-weight: bold; font-size: 1.8em; color: #5b5e63;' NGINX_SERVER_ERROR_STYLE_P: 'font-family: Georgia,Cambria,"Times New Roman",Times,serif; margin: auto; margin-bottom: 1em; font-weight: 200; line-height: 1.4em; font-size: 1.1em; max-width: 80%;' NGINX_SERVER_ERROR_STYLE_DIV: 'margin: auto; width: 800px; text-align: center; padding:20px 0px 0px 0px;' +NGINX_SERVER_HTML_FILES_TEMPLATE: "edx/var/nginx/server-static/server-template.j2" NGINX_SERVER_HTML_FILES: - file: rate-limit.html lang: "{{ NGINX_SERVER_ERROR_LANG }}" @@ -106,7 +121,7 @@ NGINX_SERVER_HTML_FILES: - file: server-error.html lang: "{{ NGINX_SERVER_ERROR_LANG }}" title: 'Server error' - msg: 'A maintenance operation is currently in progress on our system that will affect your access to our online courses.

If you have any questions or concerns, please contact {{ EDXAPP_TECH_SUPPORT_EMAIL|default("technical@example.com") }}' + msg: 'We are aware of the error and are working to find a resolution.' img: "{{ NGINX_SERVER_ERROR_IMG }}" img_alt: "{{ NGINX_SERVER_ERROR_IMG_ALT }}" heading: 'Uh oh, we are having some server issues..' @@ -127,6 +142,9 @@ NGINX_SERVER_HTML_FILES: style_div: "{{ NGINX_SERVER_ERROR_STYLE_DIV }}" + +NGINX_SERVER_STATIC_FILES: [] + NGINX_APT_REPO: deb http://nginx.org/packages/ubuntu/ {{ ansible_distribution_release }} nginx @@ -153,6 +171,10 @@ NGINX_EDXAPP_ERROR_PAGES: "504": "{{ nginx_default_error_page }}" "503": "{{ nginx_maintenance_page }}" +NGINX_EDXAPP_PROXY_INTERCEPT_ERRORS: false + +NGINX_EDXAPP_FAVICON_PATH: "/static/{{ NGINX_EDXAPP_DEFAULT_SITE_THEME }}images/favicon.ico" + CMS_HOSTNAME: '~^((stage|prod)-)?studio.*' NOTES_HOSTNAME: '~^((stage|prod)-)?notes.*' @@ -196,6 +218,7 @@ EDXAPP_CMS_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" EDXAPP_LMS_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" EDXAPP_LMS_PREVIEW_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" KIBANA_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" +PROSPECTUS_PREVIEW_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" XQUEUE_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" XSERVER_ENABLE_BASIC_AUTH: "{{ COMMON_ENABLE_BASIC_AUTH }}" @@ -239,4 +262,14 @@ NGINX_EDXAPP_CMS_APP_EXTRA: "" # Extra settings to add to site configuration for LMS NGINX_EDXAPP_LMS_APP_EXTRA: "" -NGINX_DJANGO_ADMIN_ACCESS_CIDRS: [] +# If comprehensive theme enabled, write down the name of +# the theme as in EDXAPP_DEFAULT_SITE_THEME ending with / +# to allow to override favicon properly. +# Example: "your-site-theme/" +NGINX_EDXAPP_DEFAULT_SITE_THEME: "" + +# List of subnet or IP addressess to allow to access admin endpoints +NGINX_ADMIN_ACCESS_CIDRS: [] + +# Set trusted network subnets or IP addresses to send correct replacement addresses +NGINX_TRUSTED_IP_CIDRS: "0.0.0.0/0" diff --git a/playbooks/roles/nginx/tasks/main.yml b/playbooks/roles/nginx/tasks/main.yml index 46470d33d08..9c987eb7ae1 100644 --- a/playbooks/roles/nginx/tasks/main.yml +++ b/playbooks/roles/nginx/tasks/main.yml @@ -47,10 +47,9 @@ - name: Install needed packages apt: - name: "{{ item }}" + name: "{{ nginx_debian_pkgs }}" state: present notify: restart nginx - with_items: "{{ nginx_debian_pkgs }}" tags: - install - install:system-requirements @@ -208,6 +207,9 @@ owner: root group: "{{ common_web_user }}" mode: "0640" + # Ansible 2.4 changed follow symlinks default to "no". We need this for edx-east symlink + # https://docs.ansible.com/ansible/latest/modules/template_module.html + follow: yes with_items: "{{ nginx_extra_configs }}" notify: reload nginx tags: @@ -305,7 +307,7 @@ - name: Create NGINX server templates template: - src: "edx/var/nginx/server-static/server-template.j2" + src: "{{ NGINX_SERVER_HTML_FILES_TEMPLATE }}" dest: "{{ nginx_server_static_dir }}/{{ item.file }}" owner: root group: "{{ common_web_user }}" @@ -316,7 +318,18 @@ - install:configuration - nginx:maintenance -# appsembler mod ..when ALLOW_BASIC_AUTH +- name: Copy static files + copy: + src: "{{ item }}" + dest: "{{ nginx_server_static_dir }}" + owner: "{{ common_web_user }}" + group: "{{ common_web_user }}" + mode: "0640" + with_items: "{{ NGINX_SERVER_STATIC_FILES }}" + tags: + - install + - install:configuration + - name: Write out htpasswd file htpasswd: name: "{{ item.name }}" @@ -329,6 +342,18 @@ - install - install:configuration +- name: Write out htpasswd file for prospectus preview pages + htpasswd: + name: "{{ item.name }}" + password: "{{ item.password }}" + state: "{{ item.state }}" + path: "{{ nginx_htpasswd_file }}" + with_items: "{{ PROSPECTUS_PREVIEW_NGINX_USERS }}" + when: PROSPECTUS_PREVIEW_ENABLE_BASIC_AUTH + tags: + - install + - install:configuration + - name: Create nginx log file location (just in case) file: path: "{{ nginx_log_dir}}" @@ -446,6 +471,14 @@ - install - install:configuration +# Test the nginx configs before restarting nginx so that any errors are visible and not hidden in +# the service logs. +- name: Test nginx configs + command: nginx -t + tags: + - install + - install:configuration + # nginx is started during package installation, before any of the configuration files are in place. # The handler that reloads the configuration would be run only at the very end of the playbook, so # none of the local services would be available in the meantime, e.g. causing certs to error out diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/robots.txt.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/robots.txt.j2 index 50b1e41d5e2..d25fbe499b0 100644 --- a/playbooks/roles/nginx/templates/edx/app/nginx/robots.txt.j2 +++ b/playbooks/roles/nginx/templates/edx/app/nginx/robots.txt.j2 @@ -1,5 +1,28 @@ {% for item in NGINX_ROBOT_RULES %} +{% if item.agent is string %} User-agent: {{ item.agent }} +{% else %} +{% for agent in item.agent %} +User-agent: {{ agent }} +{% endfor %} +{% endif %} +{% if item.allow is defined %} +{% if item.allow is string %} +Allow: {{ item.allow }} +{% else %} +{% for allow in item.allow %} +Allow: {{ allow }} +{% endfor %} +{% endif %} +{% endif %} +{% if item.disallow is defined %} +{% if item.disallow is string %} Disallow: {{ item.disallow }} +{% else %} +{% for disallow in item.disallow %} +Disallow: {{ disallow }} +{% endfor %} +{% endif %} +{% endif %} {% endfor %} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/analytics_api.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/analytics_api.j2 new file mode 100644 index 00000000000..7900e0a22ed --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/analytics_api.j2 @@ -0,0 +1,61 @@ +upstream analytics_api_app_server { + {% for host in nginx_analytics_api_gunicorn_hosts %} + server {{ host }}:{{ analytics_api_gunicorn_port }} fail_timeout=0; + {% endfor %} +} + +server { + listen {{ ANALYTICS_API_NGINX_PORT }} default_server; + + # Nginx does not support nested condition or or conditions so + # there is an unfortunate mix of conditonals here. + {% if NGINX_REDIRECT_TO_HTTPS %} + {% if NGINX_HTTPS_REDIRECT_STRATEGY == "scheme" %} + # Redirect http to https over single instance + if ($scheme != "https") + { + set $do_redirect_to_https "true"; + } + + {% elif NGINX_HTTPS_REDIRECT_STRATEGY == "forward_for_proto" %} + + # Forward to HTTPS if we're an HTTP request... and the server is behind ELB + if ($http_x_forwarded_proto = "http") + { + set $do_redirect_to_https "true"; + } + {% endif %} + + # Execute the actual redirect + if ($do_redirect_to_https = "true") + { + return 301 https://$host$request_uri; + } + {% endif %} + + location ~ ^/static/(?P.*) { + root {{ COMMON_DATA_DIR }}/{{ analytics_api_service_name }}; + try_files /staticfiles/$file =404; + } + + location / { + try_files $uri @proxy_to_app; + } + + {% include "robots.j2" %} + +location @proxy_to_app { + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; + + # newrelic-specific header records the time when nginx handles a request. + proxy_set_header X-Queue-Start "t=${msec}"; + + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://analytics_api_app_server; + } +} + diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms.j2 index 1bd667c53f5..65b9bc23765 100644 --- a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms.j2 +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms.j2 @@ -10,6 +10,22 @@ upstream cms-backend { {% endfor %} } +{% if EDXAPP_CORS_ORIGIN_WHITELIST|length > 0 %} + # The Origin request header indicates where a fetch originates from. It doesn't include any path information, + # but only the server name (e.g. https://www.example.com). + # See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin for details. + # + # Here we set the value that is included in the Access-Control-Allow-Origin response header. If the origin is one + # of our known hosts--served via HTTP or HTTPS--we allow for CORS. Otherwise, we set the "null" value, disallowing CORS. + map $http_origin $cors_origin { + default "null"; + {% for host in EDXAPP_CORS_ORIGIN_WHITELIST %} + "~*^https?:\/\/{{ host|replace('.', '\.') }}$" $http_origin; + {% endfor %} + } +{% endif %} + + server { # CMS configuration file for nginx, templated by ansible @@ -26,10 +42,16 @@ server { {% endif %} # error pages - {% for k, v in NGINX_EDXAPP_ERROR_PAGES.iteritems() %} + {% for k, v in NGINX_EDXAPP_ERROR_PAGES.items() %} error_page {{ k }} {{ v }}; {% endfor %} + {% if NGINX_EDXAPP_PROXY_INTERCEPT_ERRORS %} + proxy_intercept_errors on; + {% endif %} + +{% include "empty_json.j2" %} + listen {{ EDXAPP_CMS_NGINX_PORT }} {{ default_site }}; {% if NGINX_ENABLE_SSL %} @@ -42,9 +64,12 @@ error_page {{ k }} {{ v }}; {% if NGINX_ENABLE_SSL or NGINX_REDIRECT_TO_HTTPS %} # request the browser to use SSL for all connections - add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}"; + add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}; includeSubDomains"; {% endif %} + # prevent the browser from doing MIME-type sniffing + add_header X-Content-Type-Options nosniff; + # Prevent invalid display courseware in IE 10+ with high privacy settings add_header P3P '{{ NGINX_P3P_MESSAGE }}'; @@ -62,45 +87,21 @@ error_page {{ k }} {{ v }}; client_max_body_size {{ nginx_cms_client_max_body_size }}; proxy_read_timeout {{ nginx_cms_proxy_read_timeout }}; - rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last; + rewrite ^(.*)/favicon.ico$ {{ NGINX_EDXAPP_FAVICON_PATH }} last; {% include "python_lib.zip.j2" %} {% include "common-settings.j2" %} location @proxy_to_cms_app { - {% if NGINX_EDXAPP_ENABLE_LOCAL_MAINTENANCE %} - return 503; - {% endif %} - - - {% if NGINX_SET_X_FORWARDED_HEADERS %} - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $remote_addr; - {% else %} - proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; - proxy_set_header X-Forwarded-Port $http_x_forwarded_port; - proxy_set_header X-Forwarded-For $http_x_forwarded_for; - {% endif %} - - # newrelic-specific header records the time when nginx handles a request. - proxy_set_header X-Queue-Start "t=${msec}"; - - proxy_set_header Host $http_host; +{% include "cms_proxy.j2" %} + } - proxy_redirect off; - proxy_pass http://cms-backend; - {% if NGINX_CMS_PROXY_CONNECT_TIMEOUT %} - proxy_connect_timeout {{ NGINX_CMS_PROXY_CONNECT_TIMEOUT }}; - {% endif %} - {% if NGINX_CMS_PROXY_SEND_TIMEOUT %} - proxy_send_timeout {{ NGINX_CMS_PROXY_SEND_TIMEOUT }}; - {% endif %} - {% if NGINX_CMS_PROXY_READ_TIMEOUT %} - proxy_read_timeout {{ NGINX_CMS_PROXY_READ_TIMEOUT }}; - {% endif %} + location @proxy_to_cms_app_api { + error_page 504 @empty_json; + error_page 502 @empty_json; + error_page 500 @empty_json; - {{ NGINX_EDXAPP_CMS_APP_EXTRA }} +{% include "cms_proxy.j2" %} } location / { @@ -118,6 +119,12 @@ error_page {{ k }} {{ v }}; # No basic auth security on the heartbeat url, so that ELB can use it location /heartbeat { + # If /edx/var/nginx/server-static/maintenance_heartbeat.txt exists serve an + # empty 200 so the instance stays in the load balancer to serve the + # maintenance page + if (-f /edx/var/nginx/server-static/maintenance_heartbeat.txt) { + return 200; + } try_files $uri @proxy_to_cms_app; } @@ -125,16 +132,18 @@ error_page {{ k }} {{ v }}; # uses the authorization header so we can't have # basic auth on it as well. location /api { - try_files $uri @proxy_to_cms_app; + try_files $uri @proxy_to_cms_app_api; } -{% if NGINX_DJANGO_ADMIN_ACCESS_CIDRS and EDXAPP_ENABLE_DJANGO_ADMIN_RESTRICTION %} +{% if NGINX_ADMIN_ACCESS_CIDRS and EDXAPP_ENABLE_DJANGO_ADMIN_RESTRICTION %} location /admin { - {% for cidr in NGINX_DJANGO_ADMIN_ACCESS_CIDRS %} + real_ip_header X-Forwarded-For; + set_real_ip_from {{ NGINX_TRUSTED_IP_CIDRS }}; + {% for cidr in NGINX_ADMIN_ACCESS_CIDRS %} allow {{ cidr }}; {% endfor %} deny all; - try_files $uri @proxy_to_lms_app; + try_files $uri @proxy_to_cms_app; } {% endif %} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms_proxy.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms_proxy.j2 new file mode 100644 index 00000000000..899726d38b8 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms_proxy.j2 @@ -0,0 +1,28 @@ +{% if NGINX_SET_X_FORWARDED_HEADERS %} + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $remote_addr; +{% else %} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; +{% endif %} + + # newrelic-specific header records the time when nginx handles a request. + proxy_set_header X-Queue-Start "t=${msec}"; + + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://cms-backend; +{% if NGINX_CMS_PROXY_CONNECT_TIMEOUT %} + proxy_connect_timeout {{ NGINX_CMS_PROXY_CONNECT_TIMEOUT }}; +{% endif %} +{% if NGINX_CMS_PROXY_SEND_TIMEOUT %} + proxy_send_timeout {{ NGINX_CMS_PROXY_SEND_TIMEOUT }}; +{% endif %} +{% if NGINX_CMS_PROXY_READ_TIMEOUT %} + proxy_read_timeout {{ NGINX_CMS_PROXY_READ_TIMEOUT }}; +{% endif %} + +{{ NGINX_EDXAPP_CMS_APP_EXTRA }} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/conductor.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/conductor.j2 new file mode 100644 index 00000000000..7a6bd391244 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/conductor.j2 @@ -0,0 +1,65 @@ +{%- if "conductor" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +server { + # Conductor configuration file for nginx, templated by ansible + + {% if NGINX_CONDUCTOR_PROXY_INTERCEPT_ERRORS %} + proxy_intercept_errors on; + {% endif %} + + # Catches 404s from S3 and returns the default nginx 404 page instead + error_page 404 @error404; + + location @error404 { + return 404; + } + + {% include "handle-tls-redirect-and-ip-disclosure.j2" %} + + {% if NGINX_ENABLE_SSL or NGINX_REDIRECT_TO_HTTPS %} + # request the browser to use SSL for all connections + add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}"; + {% endif %} + + listen {{ CONDUCTOR_NGINX_PORT }} {{ default_site }}; + + # Redirects using the client port instead of the port the service is running + # on. This prevents redirects to the local 8000 port. + port_in_redirect off; + + {% if CONDUCTOR_REDIRECT_ROOT %} + location = / { + return 302 {{ CONDUCTOR_ROOT_REDIRECT_PATH }}; + } + {% endif %} + + # CONDUCTOR_STATIC_SITES will be a list of dictionaries which have a: + # - router_path: The path you will go to on the router to access the content + # - proxied_path: The path to proxy the requests to + {% for static_site in CONDUCTOR_STATIC_SITES %} + + # Matches: // + location = /{{ static_site.router_path }}/ { + proxy_pass {{ static_site.proxied_path }}/index.html; + } + + # Matches: //[.../] + location ~ ^/{{ static_site.router_path }}/((?:\w+\/+)*)([\w\-\.]+\.[\w\-\.]+) { + proxy_pass {{ static_site.proxied_path }}/$1$2; + } + + # Matches: ///[.../] + location ~ ^/{{ static_site.router_path }}/([a-z0-9-]+)[/]? { + proxy_pass {{ static_site.proxied_path }}/$1/index.html; + } + + {% endfor %} + + location /HealthCheck { + return 200; + } +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/edx_notes_api.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/edx_notes_api.j2 index a9afa8f28b4..24141c0fba4 100644 --- a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/edx_notes_api.j2 +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/edx_notes_api.j2 @@ -1,4 +1,9 @@ {% if INSTALL_EDX_NOTES is defined and INSTALL_EDX_NOTES %} +{%- if "edx_notes_api" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} upstream {{ edx_notes_api_service_name }}_app_server { {% for host in nginx_edx_notes_api_gunicorn_hosts %} @@ -7,8 +12,7 @@ upstream {{ edx_notes_api_service_name }}_app_server { } server { - listen {{ edx_notes_api_nginx_port }}; - server_name {{ NOTES_HOSTNAME }}; + listen {{ edx_notes_api_nginx_port }} {{ default_site }}; {% if NGINX_ENABLE_SSL %} @@ -20,7 +24,7 @@ server { {% if NGINX_ENABLE_SSL or NGINX_REDIRECT_TO_HTTPS %} # request the browser to use SSL for all connections - add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}"; + add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}; includeSubDomains"; {% endif %} {% include "common-settings.j2" %} @@ -29,7 +33,9 @@ server { add_header P3P '{{ NGINX_P3P_MESSAGE }}'; {% include "handle-tls-redirect-and-ip-disclosure.j2" %} - + + server_name {{ EDX_NOTES_API_HOSTNAME }}; + location / { try_files $uri @proxy_to_app; } diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/empty_json.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/empty_json.j2 new file mode 100644 index 00000000000..572876d76a1 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/empty_json.j2 @@ -0,0 +1,9 @@ + location @empty_json { + # This location will return an empty body with content-type application/json + # If this location is referenced by the error_page directive the + # response code will be the error response code (i.e. 502), not 200 + # despite the "return 200" directive + default_type application/json; + return 200; + } + diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/insights.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/insights.j2 index c6e5d88bb37..4f642edc91b 100644 --- a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/insights.j2 +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/insights.j2 @@ -38,7 +38,7 @@ server { {% if NGINX_ENABLE_SSL or NGINX_REDIRECT_TO_HTTPS %} # request the browser to use SSL for all connections - add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}"; + add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}; includeSubDomains"; {% endif %} location ~ ^/static/(?P.*) { diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/jenkins.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/jenkins.j2 index d346b18fad5..d783c034c70 100644 --- a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/jenkins.j2 +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/jenkins.j2 @@ -17,8 +17,8 @@ server { proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_connect_timeout 150; - proxy_send_timeout 100; - proxy_read_timeout 100; + proxy_send_timeout 300; + proxy_read_timeout 300; proxy_buffers 4 32k; client_max_body_size 16m; client_body_buffer_size 128k; diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/learner_portal.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/learner_portal.j2 new file mode 100644 index 00000000000..41bbd35afbb --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/learner_portal.j2 @@ -0,0 +1,26 @@ +{%- if "learner_portal" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +server { + listen {{ LEARNER_PORTAL_NGINX_PORT }} {{ default_site }}; + server_name ~^((stage|prod)-)?learner-portal.*; + location / { + root /edx/app/learner_portal/learner_portal/dist; + index index.html; + } +} + +server { + listen {{ LEARNER_PORTAL_SSL_NGINX_PORT }} ssl; + server_name ~^((stage|prod)-)?learner-portal.*; + ssl_certificate /etc/ssl/certs/wildcard.sandbox.edx.org.pem; + ssl_certificate_key /etc/ssl/private/wildcard.sandbox.edx.org.key; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; + location / { + root /edx/app/learner_portal/learner_portal/dist; + index index.html; + } +} \ No newline at end of file diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms.j2 index 1cae6ab0160..2647e3a9b97 100644 --- a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms.j2 +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms.j2 @@ -88,10 +88,16 @@ server { {% endif %} # error pages - {% for k, v in NGINX_EDXAPP_ERROR_PAGES.iteritems() %} + {% for k, v in NGINX_EDXAPP_ERROR_PAGES.items() %} error_page {{ k }} {{ v }}; {% endfor %} + {% if NGINX_EDXAPP_PROXY_INTERCEPT_ERRORS %} + proxy_intercept_errors on; + {% endif %} + +{% include "empty_json.j2" %} + listen {{ EDXAPP_LMS_NGINX_PORT }} {{ default_site }}; {% if NGINX_ENABLE_SSL %} @@ -103,9 +109,12 @@ error_page {{ k }} {{ v }}; {% if NGINX_ENABLE_SSL or NGINX_REDIRECT_TO_HTTPS %} # request the browser to use SSL for all connections - add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}"; + add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}; includeSubDomains"; {% endif %} + # prevent the browser from doing MIME-type sniffing + add_header X-Content-Type-Options nosniff; + # Prevent invalid display courseware in IE 10+ with high privacy settings add_header P3P '{{ NGINX_P3P_MESSAGE }}'; @@ -117,18 +126,18 @@ error_page {{ k }} {{ v }}; {% include "handle-tls-redirect-and-ip-disclosure.j2" %} - + access_log {{ nginx_log_dir }}/access.log {{ NGINX_LOG_FORMAT_NAME }}; access_log {{ nginx_log_dir }}/access_lms_json.log json_combined; error_log {{ nginx_log_dir }}/error.log error; - # CS184 requires uploads of up to 4MB for submitting screenshots. - # CMS requires larger value for course assest, values provided + # Some Master's courses require submissions up to 20MB in size. + # CMS requires larger value for course assets, values provided # via hiera. client_max_body_size {{ nginx_lms_client_max_body_size }}; proxy_read_timeout {{ nginx_lms_proxy_read_timeout }}; - rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last; + rewrite ^(.*)/favicon.ico$ {{ NGINX_EDXAPP_FAVICON_PATH }} last; {% include "python_lib.zip.j2" %} {% include "common-settings.j2" %} @@ -149,30 +158,16 @@ error_page {{ k }} {{ v }}; {% endif -%} location @proxy_to_lms_app { +{% include "lms_proxy.j2" %} - {% if NGINX_EDXAPP_ENABLE_LOCAL_MAINTENANCE %} - return 503; - {% endif %} - - {% if NGINX_SET_X_FORWARDED_HEADERS %} - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $remote_addr; - {% else %} - proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; - proxy_set_header X-Forwarded-Port $http_x_forwarded_port; - proxy_set_header X-Forwarded-For $http_x_forwarded_for; - {% endif %} - - # newrelic-specific header records the time when nginx handles a request. - proxy_set_header X-Queue-Start "t=${msec}"; - - proxy_set_header Host $http_host; + } - proxy_redirect off; - proxy_pass http://lms-backend; + location @proxy_to_lms_app_api { + error_page 504 @empty_json; + error_page 502 @empty_json; + error_page 500 @empty_json; - {{ NGINX_EDXAPP_LMS_APP_EXTRA }} +{% include "lms_proxy.j2" %} } location / { @@ -187,10 +182,6 @@ error_page {{ k }} {{ v }}; # determine whether a user on their site is logged into edX. # The most common image to use is favicon.ico. location /login { - {% if EDXAPP_LMS_ENABLE_BASIC_AUTH|bool %} - {% include "basic-auth.j2" %} - {% endif %} - if ( $arg_next ~* "favicon.ico" ) { access_log off; return 403; @@ -214,31 +205,35 @@ error_page {{ k }} {{ v }}; # uses the authorization header so we can't have # basic auth on it as well. location /api { - try_files $uri @proxy_to_lms_app; + try_files $uri @proxy_to_lms_app_api; + } + + location /login_refresh { + try_files $uri @proxy_to_lms_app_api; } #enterprise API location /enterprise/api { - try_files $uri @proxy_to_lms_app; + try_files $uri @proxy_to_lms_app_api; } # Consent API location /consent/api { - try_files $uri @proxy_to_lms_app; + try_files $uri @proxy_to_lms_app_api; } # Need a separate location for the image uploads endpoint to limit upload sizes location ~ ^/api/profile_images/[^/]*/[^/]*/upload$ { - try_files $uri @proxy_to_lms_app; + try_files $uri @proxy_to_lms_app_api; client_max_body_size {{ EDXAPP_PROFILE_IMAGE_MAX_BYTES + 1000 }}; } location /notifier_api { - try_files $uri @proxy_to_lms_app; + try_files $uri @proxy_to_lms_app_api; } location /user_api { - try_files $uri @proxy_to_lms_app; + try_files $uri @proxy_to_lms_app_api; } # No basic auth security on the github_service_hook url, so that github can use it for cms @@ -256,17 +251,36 @@ error_page {{ k }} {{ v }}; try_files $uri @proxy_to_lms_app; } + # No basic auth on the XBlock View endpoint, which can use OAuth2 + location ~ ^/courses/.*/xblock/.*/view/ { + try_files $uri @proxy_to_lms_app; + } + + # No basic auth on XBlock handlers, which can use OAuth2 + location ~ ^/courses/.*/xblock/.*/handler/ { + try_files $uri @proxy_to_lms_app; + } + # No basic auth security on assets location /c4x { try_files $uri @proxy_to_lms_app; } location /asset { + {% if EDXAPP_CORS_ORIGIN_WHITELIST|length > 0 %} + add_header Access-Control-Allow-Origin $cors_origin; + {% endif %} try_files $uri @proxy_to_lms_app; } # No basic auth security on the heartbeat url, so that ELB can use it location /heartbeat { + # If /edx/var/nginx/server-static/maintenance_heartbeat.txt exists serve an + # empty 200 so the instance stays in the load balancer to serve the + # maintenance page + if (-f /edx/var/nginx/server-static/maintenance_heartbeat.txt) { + return 200; + } try_files $uri @proxy_to_lms_app; } @@ -303,9 +317,11 @@ location ~ ^{{ EDXAPP_MEDIA_URL }}/(?P.*) { expires {{ EDXAPP_PROFILE_IMAGE_MAX_AGE }}s; } -{% if NGINX_DJANGO_ADMIN_ACCESS_CIDRS and EDXAPP_ENABLE_DJANGO_ADMIN_RESTRICTION %} +{% if NGINX_ADMIN_ACCESS_CIDRS and EDXAPP_ENABLE_DJANGO_ADMIN_RESTRICTION %} location /admin { - {% for cidr in NGINX_DJANGO_ADMIN_ACCESS_CIDRS %} + real_ip_header X-Forwarded-For; + set_real_ip_from {{ NGINX_TRUSTED_IP_CIDRS }}; + {% for cidr in NGINX_ADMIN_ACCESS_CIDRS %} allow {{ cidr }}; {% endfor %} deny all; diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms_proxy.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms_proxy.j2 new file mode 100644 index 00000000000..d045bfb90b5 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms_proxy.j2 @@ -0,0 +1,19 @@ +{% if NGINX_SET_X_FORWARDED_HEADERS %} + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $remote_addr; +{% else %} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Port $http_x_forwarded_port; + proxy_set_header X-Forwarded-For $http_x_forwarded_for; +{% endif %} + + # newrelic-specific header records the time when nginx handles a request. + proxy_set_header X-Queue-Start "t=${msec}"; + + proxy_set_header Host $http_host; + + proxy_redirect off; + proxy_pass http://lms-backend; + + {{ NGINX_EDXAPP_LMS_APP_EXTRA }} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/maps.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/maps.j2 index b4ad2608166..c4639dda84e 100644 --- a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/maps.j2 +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/maps.j2 @@ -1,4 +1,8 @@ # nginx maps are defined at the top level and are global +# +# THESE ARE GLOBAL TO ALL IDAs, USE CAUTION WHEN DEFINING HERE +# SEE https://github.com/edx/configuration/pull/5056 FOR A +# CAUTIONARY TALE # cache header for static files map $status $cache_header_long_lived { diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/program_manager.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/program_manager.j2 new file mode 100644 index 00000000000..10521468a21 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/program_manager.j2 @@ -0,0 +1,26 @@ +{%- if "program_manager" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +server { + listen {{ PROGRAM_MANAGER_NGINX_PORT }} {{ default_site }}; + server_name ~^((stage|prod)-)?program-manager.*; + location / { + root /edx/app/program-manager/program-manager/dist; + index index.html; + } +} + +server { + listen {{ PROGRAM_MANAGER_SSL_NGINX_PORT }} ssl; + server_name ~^((stage|prod)-)?program-manager.*; + ssl_certificate /etc/ssl/certs/wildcard.sandbox.edx.org.pem; + ssl_certificate_key /etc/ssl/private/wildcard.sandbox.edx.org.key; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; + location / { + root /edx/app/program-manager/program-manager/dist; + index index.html; + } +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/prospectus.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/prospectus.j2 new file mode 100644 index 00000000000..cef2b1b97a1 --- /dev/null +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/prospectus.j2 @@ -0,0 +1,114 @@ +{%- if "prospectus" in nginx_default_sites -%} + {%- set default_site = "default_server" -%} +{%- else -%} + {%- set default_site = "" -%} +{%- endif -%} + +server { + # Prospectus configuration file for nginx, templated by ansible + + {% if NGINX_PROSPECTUS_PROXY_INTERCEPT_ERRORS %} + proxy_intercept_errors on; + {% endif %} + + listen {{ PROSPECTUS_NGINX_PORT }} {{ default_site }}; + + root {{ PROSPECTUS_DATA_DIR }}; + + # Ignore the rollout group headers for the health check endpoint. + location /HealthCheck { + try_files $uri $uri/index.html @drupal; + } + + location /admin/ { + add_header 'Cache-Control' 'public, max-age=0, must-revalidate'; + try_files $uri @drupal; + } + + location /static/ { + add_header 'Cache-Control' 'public, max-age=31536000, immutable'; + } + + location /preview/ { + {% if PROSPECTUS_PREVIEW_ENABLE_BASIC_AUTH|bool %} + auth_basic "Restricted"; + auth_basic_user_file {{ nginx_htpasswd_file }}; + {% endif %} + port_in_redirect off; + } + + location /es/preview/ { + {% if PROSPECTUS_PREVIEW_ENABLE_BASIC_AUTH|bool %} + auth_basic "Restricted"; + auth_basic_user_file {{ nginx_htpasswd_file }}; + {% endif %} + port_in_redirect off; + } + + # PROSPECTUS_TEMPORARY_REDIRECTS will be a list of dictionaries which have: + # - from_path: The path of the course-about page that you want redirect + # - to_path: URL to redirect to + {% for redirect in PROSPECTUS_TEMPORARY_REDIRECTS %} + location = {{ redirect.from_path }} { + include fastcgi_params; + fastcgi_param SERVER_PORT 80; + port_in_redirect off; + return 302 {{ redirect.to_path }}; + } + {% endfor %} + + location / { + location ~ \.(js|css)$ { + add_header 'Cache-Control' 'public, max-age=31536000, immutable'; + try_files $uri @drupal; + } + + if ($http_x_content_source ~ "drupal") { + proxy_pass {{ PROSPECTUS_PROXY_PASS }}; + } + + {% if prospectus_redirect_file is defined %} + include fastcgi_params; + fastcgi_param SERVER_PORT 80; + include {{ prospectus_redirect_file }}; + port_in_redirect off; + + {% endif %} + + try_files $uri @index; + } + + # PROSPECTUS_STATIC_SITES will be a list of dictionaries which have a: + # - router_path: The path you will go to on the router to access the content + # - proxied_path: The path to proxy the requests to + {% for static_site in PROSPECTUS_STATIC_SITES %} + + # Matches: // + location = /{{ static_site.router_path }}/ { + proxy_pass {{ static_site.proxied_path }}/index.html; + } + + # Matches: /[.../] + location ~ ^/{{ static_site.router_path }}/((?:\w+\/+)*)([\w\-\.]+\.[\w\-\.]+) { + proxy_pass {{ static_site.proxied_path }}/$1$2; + } + + # Matches: //[.../] + location ~ ^/{{ static_site.router_path }}/([a-z0-9-]+)[/]? { + proxy_pass {{ static_site.proxied_path }}/$1/index.html; + } + + {% endfor %} + + location @index { + add_header 'Cache-Control' 'public, max-age=0, must-revalidate'; + try_files $uri/index.html @drupal; + } + + location @drupal { + add_header 'Cache-Control' ''; + proxy_pass {{ PROSPECTUS_PROXY_PASS }}; + proxy_set_header Host $host; + } + +} diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/static-files.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/static-files.j2 index 18d57aad6d5..a1b9deb28b2 100644 --- a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/static-files.j2 +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/static-files.j2 @@ -8,6 +8,10 @@ root {{ edxapp_data_dir }}; try_files /customer_themes/$file /staticfiles/$file /course_static/$file =404; +{% if EDXAPP_CORS_ORIGIN_WHITELIST|length > 0 %} + add_header Access-Control-Allow-Origin $cors_origin; +{% endif %} + # return a 403 for static files that shouldn't be # in the staticfiles directory location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) { @@ -17,7 +21,13 @@ # http://www.red-team-design.com/firefox-doesnt-allow-cross-domain-fonts-by-default location ~ "/static/(?P.*\.[0-9a-f]{12}\.(eot|otf|ttf|woff|woff2)$)" { add_header "Cache-Control" $cache_header_long_lived always; - add_header Access-Control-Allow-Origin *; + + # Prevent the browser from doing MIME-type sniffing + add_header X-Content-Type-Options nosniff; + +{% if EDXAPP_CORS_ORIGIN_WHITELIST|length > 0 %} + add_header Access-Control-Allow-Origin $cors_origin; +{% endif %} try_files /staticfiles/$collected /course_static/$collected =404; } diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/xqueue.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/xqueue.j2 index 7a5abc57b24..e521127c1eb 100644 --- a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/xqueue.j2 +++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/xqueue.j2 @@ -7,8 +7,28 @@ upstream xqueue_app_server { server { listen {{ XQUEUE_NGINX_PORT }} default_server; + {% if NGINX_ENABLE_SSL %} + listen {{ XQUEUE_NGINX_SSL_PORT }} ssl; + {% include "common-settings.j2" %} + access_log {{ nginx_log_dir }}/access.log {{ NGINX_LOG_FORMAT_NAME }}; + error_log {{ nginx_log_dir }}/error.log error; + + ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }}; + ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }}; + {% endif %} + + {% if NGINX_ENABLE_SSL or NGINX_REDIRECT_TO_HTTPS %} + # request the browser to use SSL for all connections + add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}"; + {% endif %} + + # set xqueue upload limit to 20MB to match the LMS upload limit. + client_max_body_size 20M; + + {% include "handle-tls-redirect-and-ip-disclosure.j2" %} + location / { {% if XQUEUE_ENABLE_BASIC_AUTH|bool %} {% include "basic-auth.j2" %} diff --git a/playbooks/roles/nginx/templates/etc/nginx/nginx.conf.j2 b/playbooks/roles/nginx/templates/etc/nginx/nginx.conf.j2 index eca8fc3a6a2..24e621fb65e 100644 --- a/playbooks/roles/nginx/templates/etc/nginx/nginx.conf.j2 +++ b/playbooks/roles/nginx/templates/etc/nginx/nginx.conf.j2 @@ -18,9 +18,14 @@ http { tcp_nodelay on; keepalive_timeout 65; types_hash_max_size 2048; + {% if NGINX_OVERRIDE_DEFAULT_MAP_HASH_SIZE %} + map_hash_max_size {{ NGINX_MAP_HASH_MAX_SIZE }}; + map_hash_bucket_size {{ NGINX_MAP_HASH_BUCKET_SIZE }}; + {% endif %} # increase header buffer for for https://edx-wiki.atlassian.net/browse/LMS-467> # see http://orensol.com/2009/01/18/nginx-and-weird-400-bad-request-responses/ large_client_header_buffers 4 16k; + server_tokens off; server_names_hash_bucket_size {{ nginx_server_names_hash_bucket_size }}; map_hash_bucket_size {{ nginx_map_hash_bucket_size }}; @@ -33,7 +38,7 @@ http { # Logging Settings ## - log_format {{ NGINX_LOG_FORMAT_NAME }} '$http_x_forwarded_for - $remote_addr - $remote_user [$time_local] ' + log_format p_combined '$http_x_forwarded_for - $remote_addr - $remote_user $http_x_forwarded_proto [$time_local] ' '"$request" $status $body_bytes_sent $request_time ' '"$http_referer" "$http_user_agent"'; @@ -56,6 +61,14 @@ http { access_log {{ nginx_log_dir }}/access.log p_combined; access_log {{ nginx_log_dir }}/access_json.log json_combined; + + log_format app_metrics 'time=$time_iso8601 client=$remote_addr method=$request_method request="$request" ' + 'request_length=$request_length status=$status bytes_sent=$bytes_sent body_bytes_sent=$body_bytes_sent ' + 'referer=$http_referer user_agent="$http_user_agent" upstream_addr=$upstream_addr upstream_status=$upstream_status ' + 'request_time=$request_time request_id=$request_id upstream_response_time=$upstream_response_time ' + 'upstream_connect_time=$upstream_connect_time upstream_header_time=$upstream_header_time'; + + access_log {{ nginx_log_dir }}/access_metrics.log app_metrics; error_log {{ nginx_log_dir }}/error.log; ## diff --git a/playbooks/roles/nltk/defaults/main.yml b/playbooks/roles/nltk/defaults/main.yml index 10cc06984de..9e439d6acc3 100644 --- a/playbooks/roles/nltk/defaults/main.yml +++ b/playbooks/roles/nltk/defaults/main.yml @@ -11,3 +11,5 @@ NLTK_DATA: url: "http://nltk.github.io/nltk_data/packages/corpora/stopwords.zip" } - { path: "corpora/wordnet", url: "http://nltk.github.io/nltk_data/packages/corpora/wordnet.zip" } + +NLTK_DOWNLOAD_TIMEOUT: 100 diff --git a/playbooks/roles/nltk/tasks/main.yml b/playbooks/roles/nltk/tasks/main.yml index 436deda0b7b..ced54c4f538 100644 --- a/playbooks/roles/nltk/tasks/main.yml +++ b/playbooks/roles/nltk/tasks/main.yml @@ -13,6 +13,7 @@ get_url: dest: "{{ NLTK_DATA_DIR }}/{{ item.url|basename }}" url: "{{ item.url }}" + timeout: "{{ NLTK_DOWNLOAD_TIMEOUT }}" with_items: "{{ NLTK_DATA }}" register: nltk_download tags: diff --git a/playbooks/roles/notifier/defaults/main.yml b/playbooks/roles/notifier/defaults/main.yml index d5f66f46b58..9f6f06f2cbd 100644 --- a/playbooks/roles/notifier/defaults/main.yml +++ b/playbooks/roles/notifier/defaults/main.yml @@ -4,9 +4,10 @@ NOTIFIER_VENV_DIR: "{{ notifier_app_dir }}/virtualenvs/notifier" NOTIFIER_SOURCE_REPO: "https://github.com/edx/notifier.git" NOTIFIER_CODE_DIR: "{{ notifier_app_dir }}/src" NOTIFIER_VERSION: "master" -NOTIFIER_REQUIREMENTS_FILE: "{{ NOTIFIER_CODE_DIR }}/requirements.txt" +NOTIFIER_REQUIREMENTS_FILE: "{{ NOTIFIER_CODE_DIR }}/requirements/base.txt" NOTIFIER_LOG_LEVEL: "INFO" NOTIFIER_RSYSLOG_ENABLED: "yes" +NOTIFIER_POLLING_INTERVAL: "5" NOTIFIER_DIGEST_TASK_INTERVAL: "1440" NOTIFIER_FORUM_DIGEST_TASK_BATCH_SIZE: "5" NOTIFIER_FORUM_DIGEST_TASK_RATE_LIMIT: "60/m" @@ -22,7 +23,6 @@ NOTIFIER_DATABASE_PORT: "" NOTIFIER_THEME_NAME: "" NOTIFIER_THEME_REPO: "" NOTIFIER_THEME_VERSION: "master" -notifier_git_ssh: "/tmp/notifier_git_ssh.sh" NOTIFIER_GIT_IDENTITY: "" notifier_git_identity: "{{ notifier_app_dir }}/notifier-git-identity" @@ -57,6 +57,7 @@ NOTIFIER_USER_SERVICE_BASE: "http://localhost:8000" NOTIFIER_USER_SERVICE_API_KEY: "PUT_YOUR_API_KEY_HERE" NOTIFIER_USER_SERVICE_HTTP_AUTH_USER: "" NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS: "" +NOTIFIER_USER_SERVICE_RESULT_PAGE_SIZE: 40 NOTIFIER_CELERY_BROKER_URL: "django://" NOTIFIER_LOGO_IMAGE_URL: "{{ NOTIFIER_LMS_URL_BASE }}/static/images/default-theme/logo.png" NOTIFIER_SUPERVISOR_LOG_DEST: "{{ COMMON_DATA_DIR }}/log/supervisor" @@ -116,6 +117,7 @@ notifier_env_vars: CS_API_KEY: "{{ NOTIFIER_COMMENT_SERVICE_API_KEY }}" US_URL_BASE: "{{ NOTIFIER_USER_SERVICE_BASE }}" US_API_KEY: "{{ NOTIFIER_USER_SERVICE_API_KEY }}" + US_RESULT_PAGE_SIZE: "{{ NOTIFIER_USER_SERVICE_RESULT_PAGE_SIZE }}" DATADOG_API_KEY: "{{ DATADOG_API_KEY|default('') }}" LOG_LEVEL: "{{ NOTIFIER_LOG_LEVEL }}" RSYSLOG_ENABLED: "{{ NOTIFIER_RSYSLOG_ENABLED }}" diff --git a/playbooks/roles/notifier/tasks/deploy.yml b/playbooks/roles/notifier/tasks/deploy.yml index 7debe60a507..19c705515a9 100644 --- a/playbooks/roles/notifier/tasks/deploy.yml +++ b/playbooks/roles/notifier/tasks/deploy.yml @@ -11,29 +11,6 @@ - "install" - "install:code" -# Optional auth for git -- name: Create ssh script for git (not authenticated) - template: - src: "git_ssh_noauth.sh.j2" - dest: "{{ notifier_git_ssh }}" - owner: "{{ notifier_user }}" - mode: "0750" - when: NOTIFIER_GIT_IDENTITY == "" - tags: - - "install" - - "install:code" - -- name: Create ssh script for git (authenticated) - template: - src: "git_ssh_auth.sh.j2" - dest: "{{ notifier_git_ssh }}" - owner: "{{ notifier_user }}" - mode: "0750" - when: NOTIFIER_GIT_IDENTITY != "" - tags: - - "install" - - "install:code" - - name: Install read-only ssh key copy: content: "{{ NOTIFIER_GIT_IDENTITY }}" @@ -52,10 +29,9 @@ repo: "{{ NOTIFIER_THEME_REPO }}" version: "{{ NOTIFIER_THEME_VERSION }}" accept_hostkey: yes + key_file: "{% if NOTIFIER_GIT_IDENTITY != '' %}{{ notifier_git_identity }}{% endif %}" when: NOTIFIER_THEME_NAME != '' become_user: "{{ notifier_user }}" - environment: - GIT_SSH: "{{ notifier_git_ssh }}" tags: - "install" - "install:code" @@ -65,7 +41,6 @@ src: "settings_local.py.j2" dest: "{{ NOTIFIER_CODE_DIR }}/notifier/settings_local.py" mode: "0555" - when: NOTIFIER_THEME_NAME != '' tags: - "install" - "install:configuration" diff --git a/playbooks/roles/notifier/tasks/main.yml b/playbooks/roles/notifier/tasks/main.yml index 6a2f2caa1e5..c93031363b4 100644 --- a/playbooks/roles/notifier/tasks/main.yml +++ b/playbooks/roles/notifier/tasks/main.yml @@ -18,10 +18,9 @@ # - name: Install notifier specific system packages apt: - name: "{{ item }}" + name: "{{ notifier_debian_pkgs }}" state: present update_cache: yes - with_items: "{{ notifier_debian_pkgs }}" tags: - "install" - "install:system-requirements" diff --git a/playbooks/roles/notifier/templates/edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2 b/playbooks/roles/notifier/templates/edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2 index 6f56c07d701..6a4ca0214ed 100644 --- a/playbooks/roles/notifier/templates/edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2 +++ b/playbooks/roles/notifier/templates/edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2 @@ -7,10 +7,9 @@ environment={% for name,value in notifier_env_vars.items() -%} {{ name }}="{{ value }}", {%- endfor -%} - PID="/var/tmp/notifier-scheduler.pid",LANG="en_US.UTF-8" - {%- if NOTIFIER_THEME_NAME != "" %},DJANGO_SETTINGS_MODULE="notifier.settings_local"{% endif %} + PID="/var/tmp/notifier-scheduler.pid",LANG="en_US.UTF-8",DJANGO_SETTINGS_MODULE="notifier.settings_local" -command={{ NOTIFIER_VENV_DIR }}/bin/python manage.py celery worker -l {{ NOTIFIER_LOG_LEVEL }} +command={{ NOTIFIER_VENV_DIR }}/bin/python manage.py celery worker -l {{ NOTIFIER_LOG_LEVEL }} --hostname=notifier.%%h process_name=%(program_name)s numprocs=1 diff --git a/playbooks/roles/notifier/templates/git_ssh_auth.sh.j2 b/playbooks/roles/notifier/templates/git_ssh_auth.sh.j2 deleted file mode 100644 index 355c0e72fb8..00000000000 --- a/playbooks/roles/notifier/templates/git_ssh_auth.sh.j2 +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ notifier_git_identity }} "$@" diff --git a/playbooks/roles/notifier/templates/git_ssh_noauth.sh.j2 b/playbooks/roles/notifier/templates/git_ssh_noauth.sh.j2 deleted file mode 100644 index e30af2deeb1..00000000000 --- a/playbooks/roles/notifier/templates/git_ssh_noauth.sh.j2 +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec /usr/bin/ssh -o StrictHostKeyChecking=no "$@" diff --git a/playbooks/roles/notifier/templates/notifier_env.j2 b/playbooks/roles/notifier/templates/notifier_env.j2 index d0a6b53cfaf..00127a18537 100644 --- a/playbooks/roles/notifier/templates/notifier_env.j2 +++ b/playbooks/roles/notifier/templates/notifier_env.j2 @@ -9,6 +9,6 @@ export {{ name }}="{{ value }}" {% endif %} {% endfor %} -{% if NOTIFIER_THEME_NAME != "" %} + export DJANGO_SETTINGS_MODULE=notifier.settings_local -{% endif %} + diff --git a/playbooks/roles/notifier/templates/settings_local.py.j2 b/playbooks/roles/notifier/templates/settings_local.py.j2 index b1f36d73039..6a9f1764fa1 100644 --- a/playbooks/roles/notifier/templates/settings_local.py.j2 +++ b/playbooks/roles/notifier/templates/settings_local.py.j2 @@ -1,6 +1,16 @@ from .settings import * +# This file extends notifier's settings + +{% if NOTIFIER_DIGEST_EMAIL_SUBJECT != "" %} FORUM_DIGEST_EMAIL_SUBJECT = '{{ NOTIFIER_DIGEST_EMAIL_SUBJECT }}' +{% endif %} +{% if NOTIFIER_THEME_NAME != "" %} CUSTOM_THEME_DIR = '{{ NOTIFIER_CODE_DIR }}/{{ NOTIFIER_THEME_NAME }}/' TEMPLATE_DIRS = (CUSTOM_THEME_DIR + 'templates',) +{% endif %} + +{% if NOTIFIER_POLLING_INTERVAL != "" %} +KOMBU_POLLING_INTERVAL = {{ NOTIFIER_POLLING_INTERVAL }} +{% endif %} diff --git a/playbooks/roles/oauth2_proxy/defaults/main.yml b/playbooks/roles/oauth2_proxy/defaults/main.yml index 0d242d33dd6..912662f529b 100644 --- a/playbooks/roles/oauth2_proxy/defaults/main.yml +++ b/playbooks/roles/oauth2_proxy/defaults/main.yml @@ -6,8 +6,9 @@ oauth2_proxy_user: "oauth2_proxy" # We define this tuple here separately because we need to know it for downloading the right tarball. Given that they # bake in both the version number -- which doesn't always match the actual Git tag they release off -- and the Go version, -# it's nearly impossible to use only `oauth2_proxy_version` to build a valid URL. -oauth2_proxy_version: "2.2.0" +# it's nearly impossible to use only `OAUTH2_PROXY_VERSION` to build a valid URL. +OAUTH2_PROXY_VERSION: "2.2.0" +oauth2_proxy_version: "{{ OAUTH2_PROXY_VERSION }}" oauth2_proxy_version_tuple: "2.2.0.linux-amd64.go1.8.1" oauth2_proxy_pkg_name: "oauth2_proxy-{{ oauth2_proxy_version_tuple }}" oauth2_proxy_release_url: "https://github.com/bitly/oauth2_proxy/releases/download/v2.2/{{ oauth2_proxy_pkg_name }}.tar.gz" diff --git a/playbooks/roles/oauth2_proxy/tasks/tag_ec2.yml b/playbooks/roles/oauth2_proxy/tasks/tag_ec2.yml index 1896234a269..f8e32001fde 100644 --- a/playbooks/roles/oauth2_proxy/tasks/tag_ec2.yml +++ b/playbooks/roles/oauth2_proxy/tasks/tag_ec2.yml @@ -1,6 +1,6 @@ --- - name: get instance information - action: ec2_facts + action: ec2_metadata_facts - name: tag instance ec2_tag: diff --git a/playbooks/roles/oauth_client_setup/defaults/main.yml b/playbooks/roles/oauth_client_setup/defaults/main.yml index bc75eba6ce5..9dad5cb6e9c 100644 --- a/playbooks/roles/oauth_client_setup/defaults/main.yml +++ b/playbooks/roles/oauth_client_setup/defaults/main.yml @@ -14,14 +14,17 @@ # # vars are namespaced with the module name. # +# oauth_client_setup_role_name: oauth_client_setup oauth_client_setup_oauth2_clients: - { name: "{{ ecommerce_service_name | default('None') }}", url_root: "{{ ECOMMERCE_ECOMMERCE_URL_ROOT | default('None') }}", - id: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_KEY | default('None') }}", - secret: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OIDC_SECRET | default('None') }}", + sso_id: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ ECOMMERCE_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ ECOMMERCE_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", logout_uri: "{{ ECOMMERCE_LOGOUT_URL | default('None') }}", username: "{{ ECOMMERCE_SERVICE_USER_NAME | default('None') }}", } @@ -30,42 +33,65 @@ oauth_client_setup_oauth2_clients: url_root: "{{ INSIGHTS_BASE_URL | default('None') }}", id: "{{ INSIGHTS_OAUTH2_KEY | default('None') }}", secret: "{{ INSIGHTS_OAUTH2_SECRET | default('None') }}", + sso_id: "{{ INSIGHTS_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ INSIGHTS_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ INSIGHTS_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ INSIGHTS_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", logout_uri: "{{ INSIGHTS_LOGOUT_URL | default('None') }}", username: "{{ INSIGHTS_SERVICE_USER_NAME | default('None') }}", } - { name: "{{ credentials_service_name | default('None') }}", url_root: "{{ CREDENTIALS_URL_ROOT | default('None') }}", - id: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_KEY | default('None') }}", - secret: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_SECRET | default('None') }}", + sso_id: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ CREDENTIALS_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ CREDENTIALS_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ CREDENTIALS_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", logout_uri: "{{ CREDENTIALS_LOGOUT_URL | default('None') }}", username: "{{ CREDENTIALS_SERVICE_USER_NAME | default('None') }}", } - { name: "{{ discovery_service_name | default('None') }}", url_root: "{{ DISCOVERY_URL_ROOT | default('None') }}", - id: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OIDC_KEY | default('None') }}", - secret: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OIDC_SECRET | default('None') }}", + sso_id: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ DISCOVERY_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ DISCOVERY_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ DISCOVERY_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", logout_uri: "{{ DISCOVERY_LOGOUT_URL | default('None') }}", username: "{{ DISCOVERY_SERVICE_USER_NAME | default('None') }}", } - - { - name: "{{ journals_service_name | default('None') }}", - url_root: "{{ JOURNALS_URL_ROOT | default('None') }}", - id: "{{ JOURNALS_SOCIAL_AUTH_EDX_OIDC_KEY | default('None') }}", - secret: "{{ JOURNALS_SOCIAL_AUTH_EDX_OIDC_SECRET | default('None') }}", - logout_uri: "{{ JOURNALS_LOGOUT_URL | default('None') }}", - username: "{{ JOURNALS_SERVICE_USER_NAME | default('None') }}", - } - { name: "{{ veda_web_frontend_service_name | default('None') }}", url_root: "{{ VEDA_WEB_FRONTEND_OAUTH2_URL | default('None') }}", - id: "{{ VEDA_WEB_FRONTEND_SOCIAL_AUTH_EDX_OIDC_KEY | default('None') }}", - secret: "{{ VEDA_WEB_FRONTEND_SOCIAL_AUTH_EDX_OIDC_SECRET | default('None') }}", logout_uri: "{{ VEDA_WEB_FRONTEND_LOGOUT_URL | default('None') }}", username: "{{ EDXAPP_VEDA_SERVICE_USER_NAME | default('None') }}" } - + - { + name: "{{ registrar_service_name | default('None') }}", + url_root: "{{ REGISTRAR_URL_ROOT | default('None') }}", + sso_id: "{{ REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + logout_uri: "{{ REGISTRAR_LOGOUT_URL | default('None') }}", + username: "{{ REGISTRAR_SERVICE_USER_NAME | default('None') }}", + } + - { + name: "{{ designer_service_name | default('None') }}", + url_root: "{{ DESIGNER_URL_ROOT | default('None') }}", + sso_id: "{{ DESIGNER_SOCIAL_AUTH_EDX_OAUTH2_KEY | default('None') }}", + sso_secret: "{{ DESIGNER_SOCIAL_AUTH_EDX_OAUTH2_SECRET | default('None') }}", + backend_service_id: "{{ DESIGNER_BACKEND_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ DESIGNER_BACKEND_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + logout_uri: "{{ DESIGNER_LOGOUT_URL | default('None') }}", + username: "{{ DESIGNER_SERVICE_USER_NAME | default('None') }}", + } + - { + name: "{{ retirement_service_name if COMMON_RETIREMENT_SERVICE_SETUP|default(false)|bool else 'None' }}", + backend_service_id: "{{ RETIREMENT_SERVICE_EDX_OAUTH2_KEY | default('None') }}", + backend_service_secret: "{{ RETIREMENT_SERVICE_EDX_OAUTH2_SECRET | default('None') }}", + username: "{{ EDXAPP_RETIREMENT_SERVICE_USER_NAME | default('None') }}", + } # # OS packages # diff --git a/playbooks/roles/oauth_client_setup/tasks/main.yml b/playbooks/roles/oauth_client_setup/tasks/main.yml index 6fa3ce4c614..9032e9871fe 100644 --- a/playbooks/roles/oauth_client_setup/tasks/main.yml +++ b/playbooks/roles/oauth_client_setup/tasks/main.yml @@ -21,21 +21,47 @@ # # -- name: create OAuth2 Clients +- name: Create OAuth2 django-oauth-toolkit SSO Applications shell: > {{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp lms --settings={{ COMMON_EDXAPP_SETTINGS }} - create_oauth2_client - {{ item.url_root }} - "{{ item.url_root }}/complete/edx-oidc/" - confidential - --client_name {{ item.name }} - --client_id {{ item.id }} - --client_secret {{ item.secret }} - --trusted - --logout_uri {{ item.logout_uri | default("") }} - {% if item.username is defined %} --username {{ item.username }} {% endif %} + create_dot_application + --grant-type authorization-code + --redirect-uris "{{ item.url_root }}/complete/edx-oauth2/" + --client-id {{ item.sso_id }} + --client-secret {{ item.sso_secret }} + --scopes user_id + --skip-authorization + --update + {{ item.name }}-sso + {{ item.username }} become_user: "{{ edxapp_user }}" environment: "{{ edxapp_environment }}" with_items: "{{ oauth_client_setup_oauth2_clients }}" - when: item.name != 'None' + when: + - item.name != 'None' + - item.sso_id is defined + - item.sso_id != 'None' + - item.sso_secret is defined + - item.sso_secret != 'None' + +- name: Create OAuth2 django-oauth-toolkit Backend Service Applications + shell: > + {{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp lms --settings={{ COMMON_EDXAPP_SETTINGS }} + create_dot_application + --grant-type client-credentials + --client-id {{ item.backend_service_id }} + --client-secret {{ item.backend_service_secret }} + --scopes user_id + --update + {{ item.name }}-backend-service + {{ item.username }} + become_user: "{{ edxapp_user }}" + environment: "{{ edxapp_environment }}" + with_items: "{{ oauth_client_setup_oauth2_clients }}" + when: + - item.name != 'None' + - item.backend_service_id is defined + - item.backend_service_id != 'None' + - item.backend_service_secret is defined + - item.backend_service_secret != 'None' tags: ["oauth_client_setup"] diff --git a/playbooks/roles/oraclejdk/defaults/main.yml b/playbooks/roles/oraclejdk/defaults/main.yml index 4a7b016ee8b..bce6457e635 100644 --- a/playbooks/roles/oraclejdk/defaults/main.yml +++ b/playbooks/roles/oraclejdk/defaults/main.yml @@ -1,6 +1,7 @@ --- -oraclejdk_version: "8u131" +ORACLEJDK_VERSION: "8u131" +oraclejdk_version: "{{ ORACLEJDK_VERSION }}" # what the archive unpacks to oraclejdk_base: "jdk1.8.0_131" oraclejdk_build: "b11" diff --git a/playbooks/roles/program_manager/defaults/main.yml b/playbooks/roles/program_manager/defaults/main.yml new file mode 100644 index 00000000000..56cd52b608a --- /dev/null +++ b/playbooks/roles/program_manager/defaults/main.yml @@ -0,0 +1,64 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# + +PROGRAM_MANAGER_URL_ROOT: !!null +PROGRAM_MANAGER_LMS_BASE_URL: !!null +PROGRAM_MANAGER_REGISTRAR_API_BASE_URL: !!null +PROGRAM_MANAGER_DISCOVERY_BASE_URL: !!null +PROGRAM_MANAGER_NGINX_PORT: 80 +PROGRAM_MANAGER_SSL_NGINX_PORT: 443 + +program_manager_home: '{{ COMMON_APP_DIR }}/{{ program_manager_service_name }}' +NVM_DIR: '{{ program_manager_home }}' +program_manager_user: 'root' +program_manager_git_identity: 'none' +program_manager_repo: 'https://github.com/edx/frontend-app-program-manager.git' +PROGRAM_MANAGER_VERSION: 'master' +program_manager_version: "{{ PROGRAM_MANAGER_VERSION }}" +program_manager_service_name: 'program-manager' +PROGRAM_MANAGER_NODE_VERSION: '12.11.1' +program_manager_node_version: "{{ PROGRAM_MANAGER_NODE_VERSION }}" +program_manager_nodeenv_dir: '{{ program_manager_home }}/nodeenvs/{{ program_manager_service_name }}' +program_manager_nodeenv_bin: '{{program_manager_nodeenv_dir}}/bin' +program_manager_app_dir: "{{ COMMON_APP_DIR }}/program-manager" +program_manager_code_dir: "{{ program_manager_app_dir }}/program-manager" +program_manager_dist_dir: "{{ program_manager_code_dir }}/dist" +program_manager_env_vars: + PATH: "{{ program_manager_nodeenv_bin }}:{{ ansible_env.PATH }}" + NODE_ENV: "production" + ACTIVE_ENV: "production" + BASE_URL: "{{ PROGRAM_MANAGER_URL_ROOT }}" + LMS_BASE_URL: "{{ PROGRAM_MANAGER_LMS_BASE_URL }}" + REGISTRAR_API_BASE_URL: "{{ PROGRAM_MANAGER_REGISTRAR_API_BASE_URL }}" + DISCOVERY_BASE_URL: "{{ PROGRAM_MANAGER_DISCOVERY_BASE_URL }}" + LOGIN_URL: '{{ COMMON_LMS_BASE_URL }}/login' + LOGOUT_URL: '{{ COMMON_LMS_BASE_URL }}/logout' + CSRF_TOKEN_API_PATH: '/csrf/api/v1/token' + REFRESH_ACCESS_TOKEN_ENDPOINT: '{{ COMMON_LMS_BASE_URL }}/login_refresh' + ACCESS_TOKEN_COOKIE_NAME: 'edx-jwt-cookie-header-payload' + USER_INFO_COOKIE_NAME: 'edx-user-info' + MARKETING_SITE_BASE_URL: 'https://stage.edx.org' + SUPPORT_URL: 'https://stage.edx.org/support' + CONTACT_URL: 'https://stage.edx.org/contact' + OPEN_SOURCE_URL: 'https://stage.edx.org/openedx' + TERMS_OF_SERVICE_URL: 'https://stage.edx.org/terms-of-service' + PRIVACY_POLICY_URL: 'https://stage.edx.org/privacy-policy' + FACEBOOK_URL: 'https://www.facebook.com' + TWITTER_URL: 'https://twitter.com' + YOU_TUBE_URL: 'https://www.youtube.com' + LINKED_IN_URL: 'https://www.linkedin.com' + GOOGLE_PLUS_URL: 'https://plus.google.com' + REDDIT_URL: 'https://www.reddit.com' + APPLE_APP_STORE_URL: 'https://www.apple.com/ios/app-store/' + GOOGLE_PLAY_URL: 'https://play.google.com/store' + SITE_NAME: '' + SEGMENT_KEY: '' diff --git a/playbooks/roles/program_manager/meta/main.yml b/playbooks/roles/program_manager/meta/main.yml new file mode 100644 index 00000000000..3d12d718ea7 --- /dev/null +++ b/playbooks/roles/program_manager/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - nginx diff --git a/playbooks/roles/program_manager/tasks/main.yml b/playbooks/roles/program_manager/tasks/main.yml new file mode 100644 index 00000000000..afdd949d8b3 --- /dev/null +++ b/playbooks/roles/program_manager/tasks/main.yml @@ -0,0 +1,89 @@ +- name: Remove old git repo + file: + state: absent + path: "{{ program_manager_code_dir }}/" + +- name: Remove old app repo + file: + state: absent + path: "{{ program_manager_app_dir }}" + +- name: Create program-manager app folder + file: + path: "{{ program_manager_app_dir }}" + state: directory + owner: "{{ program_manager_user }}" + group: "{{ program_manager_user }}" + +- name: Checkout program-manager repo into {{ program_manager_code_dir }} + git: + dest: "{{ program_manager_code_dir }}" + repo: "{{ program_manager_repo }}" + version: "{{ program_manager_version }}" + accept_hostkey: yes + become_user: "{{ program_manager_user }}" + register: program_manager_checkout + + +# Use apt to install nodeenv, so we can use nodeenv to install nodejs +- name: install nodenv by using apt + apt: + name: nodeenv + tags: + - install + - install:system-requirements + +# Install node +- name: install nodejs + shell: "nodeenv {{ program_manager_nodeenv_dir }} --node={{ program_manager_node_version }} --prebuilt --force" + become_user: "{{ program_manager_user }}" + environment: "{{ program_manager_env_vars }}" + tags: + - install + - install:system-requirements + +# Set the npm registry +# This needs to be done as root since npm is weird about +# chown - https://github.com/npm/npm/issues/3565 +- name: Set the npm registry + shell: "{{ program_manager_nodeenv_bin }}/npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'" + args: + creates: "{{ program_manager_code_dir }}/.npmrc" + environment: "{{ program_manager_env_vars }}" + become_user: "{{ program_manager_user }}" + tags: + - install + - install:app-requirements + +#we need to do this so that npm can find a node install to use to build node-sass +- name: prepend node path + shell: "{{ program_manager_nodeenv_bin }}/npm config set scripts-prepend-node-path true" + environment: "{{ program_manager_env_vars }}" + become_user: "{{ program_manager_user }}" + tags: + - install + - install:app-requirements + +#install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json +#The version of ansible we are using also does not make use of "--unsafe-perm", which we need for node-sass +- name: install node dependencies + shell: "sudo {{ program_manager_nodeenv_bin }}/node {{ program_manager_nodeenv_bin }}/npm i --unsafe-perm" + args: + chdir: "{{ program_manager_code_dir }}" + environment: "{{ program_manager_env_vars }}" + become: true + become_method: sudo + tags: + - install + - install:app-requirements + +#install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json +- name: run program-manager build + shell: "npm run build" + args: + chdir: "{{ program_manager_code_dir }}" + environment: "{{ program_manager_env_vars }}" + become_user: "{{ program_manager_user }}" + tags: + - install + - install:app-requirements diff --git a/playbooks/roles/prospectus/defaults/main.yml b/playbooks/roles/prospectus/defaults/main.yml new file mode 100644 index 00000000000..2e2d0e50b1a --- /dev/null +++ b/playbooks/roles/prospectus/defaults/main.yml @@ -0,0 +1,58 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role prospectus +# + +# .env vars +PROSPECTUS_ENVIRONMENT: !!null +PROSPECTUS_OAUTH_ID: !!null +PROSPECTUS_OAUTH_SECRET: !!null + +PROSPECTUS_NEW_RELIC_LICENSE_KEY: 'fake-key' +PROSPECTUS_NEW_RELIC_APP_ID: 'fake-id' + +PROSPECTUS_GREENHOUSE_API_TOKEN: 'fake-key' +PROSPECTUS_CONTENTFUL_SPACE_ID: 'fake-key' +PROSPECTUS_CONTENTFUL_ACCESS_TOKEN: 'fake-key' + +# nginx vars +PROSPECTUS_DATA_DIR: '/edx/var/prospectus' +PROSPECTUS_PROXY_PASS: 'http://localhost:8080' +NGINX_PROSPECTUS_PROXY_INTERCEPT_ERRORS: true +PROSPECTUS_STATIC_SITES: [] +PROSPECTUS_TEMPORARY_REDIRECTS: [] + +# task vars +PROSPECTUS_GIT_IDENTITY: "none" +prospectus_repo: 'ssh://git@github.com/edx/prospectus.git' +PROSPECTUS_VERSION: 'master' +prospectus_version: "{{ PROSPECTUS_VERSION }}" +edx_django_service_use_python3: false +PROSPECTUS_NODE_VERSION: '10.16.0' +prospectus_node_version: "{{ PROSPECTUS_NODE_VERSION }}" +prospectus_service_name: 'prospectus' +prospectus_home: '{{ COMMON_APP_DIR }}/{{ prospectus_service_name }}' +prospectus_venv_dir: '{{ prospectus_home }}/venvs/{{ prospectus_service_name }}' +prospectus_nodeenv_dir: '{{ prospectus_home }}/nodeenvs/{{ prospectus_service_name }}' +prospectus_nodeenv_bin: '{{prospectus_nodeenv_dir}}/bin' +prospectus_app_dir: "{{ COMMON_APP_DIR }}/prospectus" +prospectus_user: 'root' +prospectus_env_vars: + PATH: "{{ prospectus_nodeenv_bin }}:{{ prospectus_venv_dir }}/bin:{{ ansible_env.PATH }}" + NODE_ENV: "{{ PROSPECTUS_ENVIRONMENT }}" + ACTIVE_ENV: "{{ PROSPECTUS_ENVIRONMENT }}" + USE_COURSE_URL_SLUGS: "{{ PROSPECTUS_USE_COURSE_URL_SLUGS }}" + GATSBY_CONTENTFUL_SPACE_ID: "{{ PROSPECTUS_CONTENTFUL_SPACE_ID }}" + GATSBY_CONTENTFUL_ACCESS_TOKEN: "{{ PROSPECTUS_CONTENTFUL_ACCESS_TOKEN }}" + GATSBY_CSRF_TOKEN_API_PATH: '/csrf/api/v1/token' + GATSBY_ACCESS_TOKEN_COOKIE_NAME: 'edx-jwt-cookie-header-payload' +prospectus_git_identity: "{{ prospectus_app_dir }}/prospectus-git-identity" +prospectus_code_dir: "{{ prospectus_app_dir }}/prospectus" diff --git a/playbooks/roles/prospectus/meta/main.yml b/playbooks/roles/prospectus/meta/main.yml new file mode 100644 index 00000000000..3d12d718ea7 --- /dev/null +++ b/playbooks/roles/prospectus/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - common + - nginx diff --git a/playbooks/roles/prospectus/tasks/main.yml b/playbooks/roles/prospectus/tasks/main.yml new file mode 100644 index 00000000000..52f6821cae8 --- /dev/null +++ b/playbooks/roles/prospectus/tasks/main.yml @@ -0,0 +1,170 @@ +# Remove all of the old code +- name: Remove read-only ssh key for the prospectus repo + file: + path: "{{ prospectus_git_identity }}" + state: absent + when: PROSPECTUS_GIT_IDENTITY != "none" + +- name: Remove old git repo + file: + state: absent + path: "{{ prospectus_code_dir }}/" + +- name: Remove old app repo + file: + state: absent + path: "{{ prospectus_app_dir }}" + +- name: Remove data directory + file: + state: absent + path: "{{ PROSPECTUS_DATA_DIR }}" + +- name: Create prospectus app folder + file: + path: "{{ prospectus_app_dir }}" + state: directory + owner: "{{ prospectus_user }}" + group: "{{ prospectus_user }}" + +# This key is only needed if you are pulling down a private +# prospectus repo +- name: Install read-only ssh key for the prospectus repo + copy: + content: "{{ PROSPECTUS_GIT_IDENTITY }}" + dest: "{{ prospectus_git_identity }}" + force: yes + owner: "{{ prospectus_user }}" + mode: "0600" + when: PROSPECTUS_GIT_IDENTITY != "none" + +- name: "Checkout prospectus repo into {{ prospectus_code_dir }}" + git: + dest: "{{ prospectus_code_dir }}" + repo: "{{ prospectus_repo }}" + version: "{{ prospectus_version }}" + accept_hostkey: yes + key_file: "{{ prospectus_git_identity }}" + become_user: "{{ prospectus_user }}" + register: prospectus_checkout + when: PROSPECTUS_GIT_IDENTITY != "none" + +- name: Checkout prospectus repo into {{ prospectus_code_dir }} + git: + dest: "{{ prospectus_code_dir }}" + repo: "{{ prospectus_repo }}" + version: "{{ prospectus_version }}" + accept_hostkey: yes + become_user: "{{ prospectus_user }}" + register: prospectus_checkout + when: PROSPECTUS_GIT_IDENTITY == "none" + +- name: build virtualenv with python2.7 + command: "virtualenv --python=python2.7 {{ prospectus_venv_dir }}" + args: + creates: "{{ prospectus_venv_dir }}/bin/pip" + become_user: "{{ prospectus_user }}" + when: not edx_django_service_use_python3 + tags: + - install + - install:system-requirements + +- name: Pin pip to a specific version. + command: "{{ prospectus_venv_dir }}/bin/pip install pip=={{ common_pip_version }}" + become_user: "{{ prospectus_user }}" + tags: + - install + - install:system-requirements + +- name: Add prospectus configuration file + template: + src: ".env.environment.j2" + dest: "{{ prospectus_code_dir }}/config/.env.keys" + mode: "0644" + owner: "{{ prospectus_user }}" + group: "{{ prospectus_user }}" + +# NOTE (CCB): Ideally we should use the pip Ansible command, +# but that doesn't seem to work with the Python 3.x virtualenv. +- name: install nodenv + command: pip install nodeenv + become_user: "{{ prospectus_user }}" + environment: "{{ prospectus_env_vars }}" + tags: + - install + - install:system-requirements + +# Install node +- name: create nodeenv + shell: "{{ prospectus_venv_dir }}/bin/nodeenv {{ prospectus_nodeenv_dir }} --node={{ prospectus_node_version }} --prebuilt --force" + tags: + - install + - install:system-requirements + +# Set the npm registry +# This needs to be done as root since npm is weird about +# chown - https://github.com/npm/npm/issues/3565 +- name: Set the npm registry + shell: "npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'" + args: + creates: "{{ prospectus_code_dir }}/.npmrc" + environment: "{{ prospectus_env_vars }}" + tags: + - install + - install:app-requirements + +# Set the npm registry permissions +- name: Set the npm registry permissions + file: + path: "{{ prospectus_code_dir }}/.npmrc" + owner: "{{ prospectus_user }}" + group: "{{ prospectus_user }}" + tags: + - install + - install:app-requirements + +# Install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json +- name: install node dependencies + shell: "{{ prospectus_nodeenv_bin }}/npm install --unsafe-perm=true --allow-root" + args: + chdir: "{{ prospectus_code_dir }}" + environment: "{{ prospectus_env_vars }}" + become_user: "{{ prospectus_user }}" + tags: + - install + - install:app-requirements + +# Install with the shell command instead of the ansible npm module so we don't accidentally re-write package.json +- name: run prospectus build + shell: "{{ prospectus_nodeenv_bin }}/npm run build" + args: + chdir: "{{ prospectus_code_dir }}" + environment: "{{ prospectus_env_vars }}" + become_user: "{{ prospectus_user }}" + tags: + - install + - install:app-requirements + +- name: Install pngquant + apt: + name: "pngquant" + tags: + - install + - install:system-requirements + +- name: Compress images + shell: "find public -name '*.png' -exec pngquant --skip-if-larger --quality 50-75 --ext .png --force -- {} \\;" + args: + chdir: "{{ prospectus_code_dir }}" + become_user: "{{ prospectus_user }}" + +# Copy over the target from the previous build to where it needs to be +- name: Create data folder + file: + path: "{{ PROSPECTUS_DATA_DIR }}" + state: directory + owner: "{{ prospectus_user }}" + group: "{{ prospectus_user }}" + +- name: Move prospectus public folder to var folder + shell: "mv {{ prospectus_code_dir }}/public/* {{ PROSPECTUS_DATA_DIR }}" diff --git a/playbooks/roles/prospectus/templates/.env.environment.j2 b/playbooks/roles/prospectus/templates/.env.environment.j2 new file mode 100644 index 00000000000..3bb0bddd8ef --- /dev/null +++ b/playbooks/roles/prospectus/templates/.env.environment.j2 @@ -0,0 +1,11 @@ +# This file is created and updated by ansible + +OAUTH_ID={{ PROSPECTUS_OAUTH_ID }} +OAUTH_SECRET={{ PROSPECTUS_OAUTH_SECRET }} + +NEW_RELIC_LICENSE_KEY={{ PROSPECTUS_NEW_RELIC_LICENSE_KEY }} +NEW_RELIC_APP_ID={{ PROSPECTUS_NEW_RELIC_APP_ID }} + +GATSBY_GREENHOUSE_API_TOKEN={{ PROSPECTUS_GREENHOUSE_API_TOKEN }} +GATSBY_CONTENTFUL_SPACE_ID={{ PROSPECTUS_CONTENTFUL_SPACE_ID }} +GATSBY_CONTENTFUL_ACCESS_TOKEN={{ PROSPECTUS_CONTENTFUL_ACCESS_TOKEN }} diff --git a/playbooks/roles/python/tasks/main.yml b/playbooks/roles/python/tasks/main.yml index bdc47dc05ec..490687052e6 100644 --- a/playbooks/roles/python/tasks/main.yml +++ b/playbooks/roles/python/tasks/main.yml @@ -2,6 +2,10 @@ # xenial+ cloud images don't have python2.7 installed, and ansible doesn't yet # support python3. +- name: Wait until cloud-init has finished running + raw: test -e /usr/bin/cloud-init && cloud-init status --wait + ignore_errors: yes + - name: Update apt-get raw: apt-get update -qq register: python_update_result @@ -12,3 +16,5 @@ - name: Install packages raw: "apt-get install -qq {{ item }}" with_items: "{{ python_packages }}" + retries: 10 + delay: 10 diff --git a/playbooks/roles/rabbitmq/tasks/main.yml b/playbooks/roles/rabbitmq/tasks/main.yml index b8c934b5407..575bb133dca 100644 --- a/playbooks/roles/rabbitmq/tasks/main.yml +++ b/playbooks/roles/rabbitmq/tasks/main.yml @@ -5,12 +5,11 @@ # - name: install packages needed by rabbit apt: - name: "{{ item }}" + name: "{{ rabbitmq_debian_pkgs }}" state: present install_recommends: yes force: yes update_cache: yes - with_items: "{{ rabbitmq_debian_pkgs }}" tags: - install - install:app-requirements @@ -263,7 +262,7 @@ permissions: "{{ permissions|default([])+[{'vhost':item,'configure_priv':'.*','read_priv':'.*','write_priv':'.*'}] }}" with_items: - "{{ RABBITMQ_VHOSTS }}" - tags: + tags: - users - maintenance - "manage" diff --git a/playbooks/roles/rbenv/defaults/main.yml b/playbooks/roles/rbenv/defaults/main.yml index 1dced1fa45f..0011dcc5100 100644 --- a/playbooks/roles/rbenv/defaults/main.yml +++ b/playbooks/roles/rbenv/defaults/main.yml @@ -1,11 +1,16 @@ --- -rbenv_version: 'v1.0.0' -rbenv_bundler_version: '1.11.2' -rbenv_rake_version: '10.4.2' +RBENV_VERSION: 'v1.0.0' +rbenv_version: "{{ RBENV_VERSION }}" +RBENV_BUNDLER_VERSION: '1.11.2' +rbenv_bundler_version: "{{ RBENV_BUNDLER_VERSION }}" +RBENV_RAKE_VERSION: '10.4.2' +rbenv_rake_version: "{{ RBENV_RAKE_VERSION }}" rbenv_root: "{{ rbenv_dir }}/.rbenv" rbenv_gem_root: "{{ rbenv_dir }}/.gem" rbenv_gem_bin: "{{ rbenv_gem_root }}/bin" +RBENV_RUBYGEMS_VERSION: '2.7.8' +rbenv_rubygems_version: "{{ RBENV_RUBYGEMS_VERSION }}" rbenv_bin: "{{ rbenv_dir }}/.rbenv/bin" rbenv_shims: "{{ rbenv_root }}/shims" rbenv_path: "{{ rbenv_bin }}:{{ rbenv_shims }}:{{ rbenv_gem_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" diff --git a/playbooks/roles/rbenv/tasks/main.yml b/playbooks/roles/rbenv/tasks/main.yml index 1da4548b1d7..7474aac7fc0 100644 --- a/playbooks/roles/rbenv/tasks/main.yml +++ b/playbooks/roles/rbenv/tasks/main.yml @@ -94,7 +94,7 @@ - name: if ruby-build exists, which versions we can install command: /usr/local/bin/ruby-build --definitions - when: rbuild_present|success + when: rbuild_present is succeeded register: installable_ruby_vers ignore_errors: yes tags: @@ -106,7 +106,7 @@ command: mktemp -d register: tempdir become_user: "{{ rbenv_user }}" - when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) + when: rbuild_present is failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) tags: - install - install:base @@ -116,7 +116,7 @@ repo: https://github.com/sstephenson/ruby-build.git dest: "{{ tempdir.stdout }}/ruby-build" accept_hostkey: yes - when: tempdir.stdout is defined and (rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)) + when: tempdir.stdout is defined and (rbuild_present is failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)) become_user: "{{ rbenv_user }}" tags: - install @@ -124,14 +124,14 @@ - name: install ruby-build command: ./install.sh chdir={{ tempdir.stdout }}/ruby-build - when: tempdir.stdout is defined and (rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)) + when: tempdir.stdout is defined and (rbuild_present is failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)) tags: - install - install:base - name: remove temporary directory file: path={{ tempdir.stdout }} state=absent - when: tempdir.stdout is defined and (rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)) + when: tempdir.stdout is defined and (rbuild_present is failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)) tags: - install - install:base @@ -148,7 +148,7 @@ - name: install ruby {{ rbenv_ruby_version }} shell: "rbenv install {{ rbenv_ruby_version }} creates={{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}" - when: ruby_installed|failed + when: ruby_installed is failed become_user: "{{ rbenv_user }}" environment: "{{ rbenv_environment }}" tags: @@ -163,6 +163,14 @@ - install - install:base +- name: update rubygems + shell: "gem update --system {{ rbenv_rubygems_version }}" + become_user: "{{ rbenv_user }}" + environment: "{{ rbenv_environment }}" + tags: + - install + - install:base + - name: install bundler shell: "gem install bundler -v {{ rbenv_bundler_version }}" become_user: "{{ rbenv_user }}" @@ -176,7 +184,7 @@ when: jenkins_worker is not defined or not jenkins_worker tags: - install - - install:base + - install:base - name: install rake gem shell: "gem install rake -v {{ rbenv_rake_version }}" @@ -187,17 +195,6 @@ - install - install:base -# workaround rubygems update bug by forcing the version -# see: https://github.com/rubygems/rubygems/issues/2534 -# and: https://github.com/colszowka/simplecov/commit/ba423820d1d1164fcc038e7b6163b8a96d1786be -- name: update rubygems - shell: "gem install rubygems-update -v '<3' && update_rubygems" - become_user: "{{ rbenv_user }}" - environment: "{{ rbenv_environment }}" - tags: - - install - - install:base - - name: rehash shell: "rbenv rehash" become_user: "{{ rbenv_user }}" diff --git a/playbooks/roles/redis/tasks/main.yml b/playbooks/roles/redis/tasks/main.yml index 52ea81914ac..9f454cd1c48 100644 --- a/playbooks/roles/redis/tasks/main.yml +++ b/playbooks/roles/redis/tasks/main.yml @@ -10,24 +10,23 @@ # # # Tasks for role redis -# +# # Overview: -# +# # # Dependencies: # -# +# # Example play: # # - name: Install redis system packages apt: - name: "{{ item }}" + name: "{{ redis_debian_pkgs }}" install_recommends: yes state: present update_cache: yes - with_items: "{{ redis_debian_pkgs }}" notify: - reload redis diff --git a/playbooks/roles/registrar/defaults/main.yml b/playbooks/roles/registrar/defaults/main.yml new file mode 100644 index 00000000000..7667dab4807 --- /dev/null +++ b/playbooks/roles/registrar/defaults/main.yml @@ -0,0 +1,154 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# Defaults for role registrar +# + +REGISTRAR_ENABLED: True + +# +# vars are namespace with the module name. +# +registrar_service_name: 'registrar' + +registrar_user: "{{ registrar_service_name }}" +registrar_home: "{{ COMMON_APP_DIR }}/{{ registrar_service_name }}" +registrar_app_dir: "{{ COMMON_APP_DIR }}/{{ registrar_service_name }}" +registrar_code_dir: "{{ registrar_app_dir }}/{{ registrar_service_name }}" +registrar_venvs_dir: "{{ registrar_app_dir }}/venvs" +registrar_venv_dir: "{{ registrar_venvs_dir }}/registrar" +registrar_celery_default_queue: 'registrar.default' + +REGISTRAR_CELERY_ALWAYS_EAGER: false +REGISTRAR_CELERY_BROKER_TRANSPORT: '' +REGISTRAR_CELERY_BROKER_USER: '' +REGISTRAR_CELERY_BROKER_PASSWORD: '' +REGISTRAR_CELERY_BROKER_HOSTNAME: '' +REGISTRAR_CELERY_BROKER_VHOST: '' + +registrar_environment: + REGISTRAR_CFG: '{{ COMMON_CFG_DIR }}/{{ registrar_service_name }}.yml' + +registrar_gunicorn_port: 8734 + +registrar_debian_pkgs: [] + +REGISTRAR_NGINX_PORT: '1{{ registrar_gunicorn_port }}' +REGISTRAR_SSL_NGINX_PORT: '4{{ registrar_gunicorn_port }}' + +REGISTRAR_DEFAULT_DB_NAME: 'registrar' +REGISTRAR_MYSQL_HOST: 'localhost' +# MySQL usernames are limited to 16 characters +REGISTRAR_MYSQL_USER: 'registrar001' +REGISTRAR_MYSQL_PASSWORD: 'password' +REGISTRAR_MYSQL_CONN_MAX_AGE: 60 + +REGISTRAR_MEMCACHE: [ 'memcache' ] + +REGISTRAR_DJANGO_SETTINGS_MODULE: 'registrar.settings.production' +REGISTRAR_DOMAIN: 'localhost' +REGISTRAR_URL_ROOT: 'http://{{ REGISTRAR_DOMAIN }}:{{ REGISTRAR_NGINX_PORT }}' +REGISTRAR_API_ROOT: '{{ REGISTRAR_URL_ROOT }}/api' +REGISTRAR_LOGOUT_URL: '{{ REGISTRAR_URL_ROOT }}/logout/' + +REGISTRAR_LANG: 'en_US.UTF-8' +REGISTRAR_LANGUAGE_CODE: 'en' +REGISTRAR_LANGUAGE_COOKIE_NAME: 'openedx-language-preference' + +REGISTRAR_SERVICE_USER: 'registrar_service_user' + +REGISTRAR_DATA_DIR: '{{ COMMON_DATA_DIR }}/{{ registrar_service_name }}' +REGISTRAR_MEDIA_ROOT: '{{ REGISTRAR_DATA_DIR }}/media' +REGISTRAR_MEDIA_URL: '/api/media/' + +REGISTRAR_MEDIA_STORAGE_BACKEND: + DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage' + MEDIA_ROOT: '{{ REGISTRAR_MEDIA_ROOT }}' + MEDIA_URL: '{{ REGISTRAR_MEDIA_URL }}' + +# TODO: Let edx_django_service manage REGISTRAR_STATIC_ROOT in phase 2. +REGISTRAR_STATIC_ROOT: '{{ REGISTRAR_DATA_DIR }}/staticfiles' +REGISTRAR_STATIC_URL: '/static/' + +REGISTRAR_STATICFILES_STORAGE: 'django.contrib.staticfiles.storage.StaticFilesStorage' + +REGISTRAR_CORS_ORIGIN_ALLOW_ALL: false +REGISTRAR_CORS_ORIGIN_WHITELIST: [] + +REGISTRAR_CSRF_COOKIE_SECURE: false +REGISTRAR_CSRF_TRUSTED_ORIGINS: [] + +REGISTRAR_VERSION: 'master' + +REGISTRAR_GUNICORN_EXTRA: '' + +REGISTRAR_EXTRA_APPS: [] + +REGISTRAR_SESSION_EXPIRE_AT_BROWSER_CLOSE: false + +REGISTRAR_CERTIFICATE_LANGUAGES: + 'en': 'English' + 'es_419': 'Spanish' + +# Used to automatically configure OAuth2 Client +REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_KEY: 'registrar-sso-key' +REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_SECRET: 'registrar-sso-secret' +REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_KEY: 'registrar-backend-service-key' +REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_SECRET: 'registrar-backend-service-secret' +REGISTRAR_SOCIAL_AUTH_REDIRECT_IS_HTTPS: false + +# API key for segment.io +REGISTRAR_SEGMENT_KEY: !!null + +REGISTRAR_DISCOVERY_BASE_URL: !!null +REGISTRAR_LMS_BASE_URL: !!null + +registrar_service_config_overrides: + CERTIFICATE_LANGUAGES: '{{ REGISTRAR_CERTIFICATE_LANGUAGES }}' + REGISTRAR_SERVICE_USER: '{{ REGISTRAR_SERVICE_USER }}' + LANGUAGE_COOKIE_NAME: '{{ REGISTRAR_LANGUAGE_COOKIE_NAME }}' + SEGMENT_KEY: "{{ REGISTRAR_SEGMENT_KEY }}" + DISCOVERY_BASE_URL: "{{ REGISTRAR_DISCOVERY_BASE_URL }}" + LMS_BASE_URL: "{{ REGISTRAR_LMS_BASE_URL }}" + CORS_ORIGIN_WHITELIST: "{{ REGISTRAR_CORS_ORIGIN_WHITELIST }}" + CSRF_TRUSTED_ORIGINS: "{{ REGISTRAR_CSRF_TRUSTED_ORIGINS }}" + CSRF_COOKIE_SECURE: "{{ REGISTRAR_CSRF_COOKIE_SECURE }}" + CELERY_ALWAYS_EAGER: '{{ REGISTRAR_CELERY_ALWAYS_EAGER }}' + CELERY_BROKER_TRANSPORT: '{{ REGISTRAR_CELERY_BROKER_TRANSPORT }}' + CELERY_BROKER_USER: '{{ REGISTRAR_CELERY_BROKER_USER }}' + CELERY_BROKER_PASSWORD: '{{ REGISTRAR_CELERY_BROKER_PASSWORD }}' + CELERY_BROKER_HOSTNAME: '{{ REGISTRAR_CELERY_BROKER_HOSTNAME }}' + CELERY_BROKER_VHOST: '{{ REGISTRAR_CELERY_BROKER_VHOST }}' + CELERY_DEFAULT_EXCHANGE: 'registrar' + CELERY_DEFAULT_ROUTING_KEY: 'registrar' + CELERY_DEFAULT_QUEUE: '{{ registrar_celery_default_queue }}' + +# See edx_django_service_automated_users for an example of what this should be +REGISTRAR_AUTOMATED_USERS: {} + +# NOTE: These variables are only needed to create the demo site (e.g. for sandboxes) + +REGISTRAR_ENABLE_NEWRELIC_DISTRIBUTED_TRACING: false + +# Remote config +REGISTRAR_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + +REGISTRAR_DECRYPT_CONFIG_ENABLED: "{{ COMMON_DECRYPT_CONFIG_ENABLED }}" +REGISTRAR_COPY_CONFIG_ENABLED: "{{ COMMON_COPY_CONFIG_ENABLED }}" + +# Worker settings +worker_django_settings_module: "{{ REGISTRAR_DJANGO_SETTINGS_MODULE }}" +REGISTRAR_CELERY_WORKERS: + - queue: '{{ registrar_celery_default_queue }}' + concurrency: 1 + monitor: True +registrar_workers: "{{ REGISTRAR_CELERY_WORKERS }}" + +registrar_post_migrate_commands: [] diff --git a/playbooks/roles/registrar/meta/main.yml b/playbooks/roles/registrar/meta/main.yml new file mode 100644 index 00000000000..5550e6730fc --- /dev/null +++ b/playbooks/roles/registrar/meta/main.yml @@ -0,0 +1,53 @@ +--- +# +# edX Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +## +# Role includes for role registrar +# +dependencies: + - role: edx_django_service + edx_django_service_version: '{{ REGISTRAR_VERSION }}' + edx_django_service_name: '{{ registrar_service_name }}' + edx_django_service_home: '{{ COMMON_APP_DIR }}/{{ registrar_service_name }}' + edx_django_service_user: '{{ registrar_user }}' + edx_django_service_config_overrides: '{{ registrar_service_config_overrides }}' + edx_django_service_debian_pkgs_extra: '{{ registrar_debian_pkgs }}' + edx_django_service_gunicorn_port: '{{ registrar_gunicorn_port }}' + edx_django_service_django_settings_module: '{{ REGISTRAR_DJANGO_SETTINGS_MODULE }}' + edx_django_service_environment_extra: '{{ registrar_environment }}' + edx_django_service_gunicorn_extra: '{{ REGISTRAR_GUNICORN_EXTRA }}' + edx_django_service_nginx_port: '{{ REGISTRAR_NGINX_PORT }}' + edx_django_service_ssl_nginx_port: '{{ REGISTRAR_SSL_NGINX_PORT }}' + edx_django_service_language_code: '{{ REGISTRAR_LANGUAGE_CODE }}' + edx_django_service_secret_key: '{{ REGISTRAR_SECRET_KEY }}' + edx_django_service_media_storage_backend: '{{ REGISTRAR_MEDIA_STORAGE_BACKEND }}' + edx_django_service_staticfiles_storage: '{{ REGISTRAR_STATICFILES_STORAGE }}' + edx_django_service_memcache: '{{ REGISTRAR_MEMCACHE }}' + edx_django_service_default_db_host: '{{ REGISTRAR_MYSQL_HOST }}' + edx_django_service_default_db_name: '{{ REGISTRAR_DEFAULT_DB_NAME }}' + edx_django_service_default_db_atomic_requests: false + edx_django_service_db_user: '{{ REGISTRAR_MYSQL_USER }}' + edx_django_service_db_password: '{{ REGISTRAR_MYSQL_PASSWORD }}' + edx_django_service_default_db_conn_max_age: '{{ REGISTRAR_MYSQL_CONN_MAX_AGE }}' + edx_django_service_extra_apps: '{{ REGISTRAR_EXTRA_APPS }}' + edx_django_service_session_expire_at_browser_close: '{{ REGISTRAR_SESSION_EXPIRE_AT_BROWSER_CLOSE }}' + edx_django_service_social_auth_edx_oauth2_key: '{{ REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_KEY }}' + edx_django_service_social_auth_edx_oauth2_secret: '{{ REGISTRAR_SOCIAL_AUTH_EDX_OAUTH2_SECRET }}' + edx_django_service_backend_service_edx_oauth2_key: '{{ REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_KEY }}' + edx_django_service_backend_service_edx_oauth2_secret: '{{ REGISTRAR_BACKEND_SERVICE_EDX_OAUTH2_SECRET }}' + edx_django_service_automated_users: '{{ REGISTRAR_AUTOMATED_USERS }}' + edx_django_service_cors_whitelist: '{{ REGISTRAR_CORS_ORIGIN_WHITELIST }}' + edx_django_service_post_migrate_commands: '{{ registrar_post_migrate_commands }}' + edx_django_service_enable_newrelic_distributed_tracing: '{{ REGISTRAR_ENABLE_NEWRELIC_DISTRIBUTED_TRACING }}' + edx_django_service_api_root: '{{ REGISTRAR_API_ROOT }}' + edx_django_service_decrypt_config_enabled: '{{ REGISTRAR_DECRYPT_CONFIG_ENABLED }}' + edx_django_service_copy_config_enabled: '{{ REGISTRAR_COPY_CONFIG_ENABLED }}' + edx_django_service_migration_check_services: '{{ registrar_service_name }},{{ registrar_service_name }}-workers' + edx_django_service_enable_celery_workers: true + edx_django_service_workers: '{{ registrar_workers }}' diff --git a/playbooks/roles/docker/defaults/main.yml b/playbooks/roles/registrar/tasks/main.yml similarity index 65% rename from playbooks/roles/docker/defaults/main.yml rename to playbooks/roles/registrar/tasks/main.yml index 2e67fd38161..6ae3b656217 100644 --- a/playbooks/roles/docker/defaults/main.yml +++ b/playbooks/roles/registrar/tasks/main.yml @@ -7,19 +7,17 @@ # code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions # license: https://github.com/edx/configuration/blob/master/LICENSE.TXT # -## -# Defaults for role docker -# - # -# vars are namespaced with the module name. # -docker_role_name: docker - +# Tasks for role registrar +# +# Overview: This role's tasks come from edx_django_service. +# +# +# Dependencies: +# +# +# Example play: # -# OS packages # -docker_debian_pkgs: [] - -docker_redhat_pkgs: [] diff --git a/playbooks/roles/s3fs/defaults/main.yml b/playbooks/roles/s3fs/defaults/main.yml index bea88bb075b..cb43dbe7aae 100644 --- a/playbooks/roles/s3fs/defaults/main.yml +++ b/playbooks/roles/s3fs/defaults/main.yml @@ -11,7 +11,8 @@ # Vars for role s3fs # -s3fs_version: 's3fs-1.73' +S3FS_VERSION: 's3fs-1.73' +s3fs_version: "{{ S3FS_VERSION }}" s3fs_download_src: 'http://s3fs.googlecode.com/files/' s3fs_archive: '{{ s3fs_version }}.tar.gz' s3fs_download_url: '{{ s3fs_download_src }}/{{ s3fs_archive }}' diff --git a/playbooks/roles/security/defaults/main.yml b/playbooks/roles/security/defaults/main.yml index ef01b14e6d1..4699b792cec 100644 --- a/playbooks/roles/security/defaults/main.yml +++ b/playbooks/roles/security/defaults/main.yml @@ -37,3 +37,7 @@ security_debian_pkgs: security_redhat_pkgs: - yum-plugin-security - yum-cron + + +SECURITY_DEBIAN_PKGS_BLACKLIST: [] + diff --git a/playbooks/roles/security/tasks/security-amazon.yml b/playbooks/roles/security/tasks/security-amazon.yml index 2d0ece1d5d9..6085d38061e 100644 --- a/playbooks/roles/security/tasks/security-amazon.yml +++ b/playbooks/roles/security/tasks/security-amazon.yml @@ -2,10 +2,9 @@ #### Enable periodic security updates - name: Install security packages yum: - name: "{{ item }}" + name: "{{ security_redhat_pkgs }}" state: latest update_cache: yes - with_items: "{{ security_redhat_pkgs }}" - name: Enable automatic start for update service service: diff --git a/playbooks/roles/security/tasks/security-ubuntu.yml b/playbooks/roles/security/tasks/security-ubuntu.yml index 9c05e8cb4be..8dc273dc902 100644 --- a/playbooks/roles/security/tasks/security-ubuntu.yml +++ b/playbooks/roles/security/tasks/security-ubuntu.yml @@ -2,10 +2,9 @@ #### Enable periodic security updates - name: Install security packages apt: - name: "{{ item }}" + name: "{{ security_debian_pkgs }}" state: latest update_cache: yes - with_items: "{{ security_debian_pkgs }}" - name: Update all system packages @@ -46,6 +45,14 @@ mode: "0644" when: SECURITY_UNATTENDED_UPGRADES and not SECURITY_UPDATE_ALL_PACKAGES +- name: Add debian blacklist + template: + src: "etc/apt/apt.conf.d/50unattended-upgrades" + dest: "/etc/apt/apt.conf.d/50unattended-upgrades" + owner: root + group: root + mode: "0644" + - name: Disable security only updates on unattended-upgrades file: path: "/etc/apt/apt.conf.d/20unattended-upgrade" diff --git a/playbooks/roles/security/templates/etc/apt/apt.conf.d/50unattended-upgrades b/playbooks/roles/security/templates/etc/apt/apt.conf.d/50unattended-upgrades new file mode 100644 index 00000000000..a099aa23619 --- /dev/null +++ b/playbooks/roles/security/templates/etc/apt/apt.conf.d/50unattended-upgrades @@ -0,0 +1,5 @@ +Unattended-Upgrade::Package-Blacklist{ + {% for blacklisted_item in SECURITY_DEBIAN_PKGS_BLACKLIST %} + {{ blacklisted_item }}; + {% endfor %} +} \ No newline at end of file diff --git a/playbooks/roles/server_utils/tasks/main.yml b/playbooks/roles/server_utils/tasks/main.yml index 73f28b0a862..5b8d81bf848 100644 --- a/playbooks/roles/server_utils/tasks/main.yml +++ b/playbooks/roles/server_utils/tasks/main.yml @@ -10,13 +10,13 @@ # # # Tasks for role server_utils -# +# # Overview: # # Install useful interactive utilities for triange and debugging purposes. # Typically these would not need to be available on servers as shell access # is uneccessary. -# +# # Example play: # # @@ -32,9 +32,8 @@ - name: Install ubuntu system packages apt: - name: "{{ item }}" + name: "{{ server_utils_debian_pkgs }}" install_recommends: yes - state: present + state: present update_cache: yes - with_items: "{{ server_utils_debian_pkgs }}" when: ansible_distribution in common_debian_variants diff --git a/playbooks/roles/simple_theme/tasks/deploy.yml b/playbooks/roles/simple_theme/tasks/deploy.yml index dd66e01de32..85cfb4ec4b8 100644 --- a/playbooks/roles/simple_theme/tasks/deploy.yml +++ b/playbooks/roles/simple_theme/tasks/deploy.yml @@ -90,6 +90,7 @@ group: "{{ common_web_group }}" with_items: # List of files from ./templates to be processed + - "lms/static/sass/common-variables.scss" - "lms/static/sass/partials/lms/theme/_variables-v1.scss" - "lms/static/sass/_lms-overrides.scss" diff --git a/playbooks/roles/simple_theme/templates/lms/static/sass/_lms-overrides.scss.j2 b/playbooks/roles/simple_theme/templates/lms/static/sass/_lms-overrides.scss.j2 index c075f702630..3a3a4905f87 100644 --- a/playbooks/roles/simple_theme/templates/lms/static/sass/_lms-overrides.scss.j2 +++ b/playbooks/roles/simple_theme/templates/lms/static/sass/_lms-overrides.scss.j2 @@ -1,4 +1,4 @@ +@import 'common-variables'; /* Extra SASS as defined by simple_theme starts here: */ {{ SIMPLETHEME_EXTRA_SASS }} /* Extra SASS as defined by simple_theme ends here. */ - diff --git a/playbooks/roles/simple_theme/templates/lms/static/sass/common-variables.scss.j2 b/playbooks/roles/simple_theme/templates/lms/static/sass/common-variables.scss.j2 new file mode 100644 index 00000000000..1b1c579884c --- /dev/null +++ b/playbooks/roles/simple_theme/templates/lms/static/sass/common-variables.scss.j2 @@ -0,0 +1,5 @@ +/* Variables from simple_theme role start here */ +{% for item in SIMPLETHEME_SASS_OVERRIDES %} +${{ item.variable }}: {{ item.value }}; +{% endfor %} +/* Variables from simple_theme role end here */ diff --git a/playbooks/roles/simple_theme/templates/lms/static/sass/partials/lms/theme/_variables-v1.scss.j2 b/playbooks/roles/simple_theme/templates/lms/static/sass/partials/lms/theme/_variables-v1.scss.j2 index c65524605c8..5bdd4d5aaf3 100644 --- a/playbooks/roles/simple_theme/templates/lms/static/sass/partials/lms/theme/_variables-v1.scss.j2 +++ b/playbooks/roles/simple_theme/templates/lms/static/sass/partials/lms/theme/_variables-v1.scss.j2 @@ -1,8 +1,2 @@ -/* Variables from simple_theme role start here */ -{% for item in SIMPLETHEME_SASS_OVERRIDES %} -${{ item.variable }}: {{ item.value }}; -{% endfor %} -/* Variables from simple_theme role end here */ - -@import 'lms/static/sass/partials/lms/theme/variables-v1'; - +@import '../common-variables'; +@import 'lms/static/sass/partials/lms/theme/variables-v1'; diff --git a/playbooks/roles/splunk-server/defaults/main.yml b/playbooks/roles/splunk-server/defaults/main.yml index 8a3a6edd292..af2ef2acd48 100644 --- a/playbooks/roles/splunk-server/defaults/main.yml +++ b/playbooks/roles/splunk-server/defaults/main.yml @@ -106,6 +106,12 @@ SPLUNK_FROM_ADDRESS: no-reply@example.com SPLUNK_EMAIL_FOOTER: Generated by {{ SPLUNK_HOSTNAME }} SPLUNK_SSL_HOSTNAME: splunk.example.com:443 +# Enable this if you want to terminate TLS elsewhere +SPLUNK_PROXY_ENABLED: false +SPLUNK_PROXY_SCHEME: "https" +SPLUNK_PROXY_HOSTNAME: "{{ SPLUNK_HOSTNAME }}" +SPLUNK_PROXY_URL: "{{ SPLUNK_PROXY_SCHEME }}://{{ SPLUNK_PROXY_HOSTNAME }}" + # SSL settings. Either all or none of these must be defined. # For more details about setting up splunk with SSL, see # https://openedx.atlassian.net/wiki/display/EdxOps/viewpage.action?pageId=40174184 diff --git a/playbooks/roles/splunk-server/meta/main.yml b/playbooks/roles/splunk-server/meta/main.yml index 7a819c756ab..86943d37869 100644 --- a/playbooks/roles/splunk-server/meta/main.yml +++ b/playbooks/roles/splunk-server/meta/main.yml @@ -14,5 +14,7 @@ dependencies: user_info: "{{ COMMON_USER_INFO }}" - role: security when: COMMON_SECURITY_UPDATES + - role: newrelic_infrastructure + when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE - role: mount_ebs volumes: "{{ SPLUNK_VOLUMES }}" diff --git a/playbooks/roles/splunk-server/tasks/main.yml b/playbooks/roles/splunk-server/tasks/main.yml index 065ae9d86e6..1656aa72517 100644 --- a/playbooks/roles/splunk-server/tasks/main.yml +++ b/playbooks/roles/splunk-server/tasks/main.yml @@ -57,6 +57,40 @@ group: splunk mode: "0600" +- name: Create web configuration + template: + src: opt/splunk/etc/system/local/web.conf.j2 + dest: "{{ splunk_home }}/etc/system/local/web.conf" + owner: splunk + group: splunk + mode: "0600" + tags: + - "install" + - "install:configuration" + - "install:configuration:web" + +- name: Create props configuation + template: + src: opt/splunk/etc/system/local/props.conf.j2 + dest: "{{ splunk_home }}/etc/system/local/props.conf" + owner: splunk + group: splunk + mode: "0600" + tags: + - "install" + - "install:configuration" + +- name: Create transforms configuation + template: + src: opt/splunk/etc/system/local/transforms.conf.j2 + dest: "{{ splunk_home }}/etc/system/local/transforms.conf" + owner: splunk + group: splunk + mode: "0600" + tags: + - "install" + - "install:configuration" + - name: Create bucket directories file: path: "{{ item }}" diff --git a/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/props.conf.j2 b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/props.conf.j2 new file mode 100644 index 00000000000..ac63d159202 --- /dev/null +++ b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/props.conf.j2 @@ -0,0 +1,49 @@ +# Version 6.2.1 +# DO NOT EDIT THIS FILE! +# Changes to default files will be lost on update and are difficult to +# manage and support. +# +# Please make any changes to system defaults by overriding them in +# apps or $SPLUNK_HOME/etc/system/local +# (See "Configuration file precedence" in the web documentation). +# +# To override a specific setting, copy the name of the stanza and +# setting to the file where you wish to override it. +# +# This file contains possible attribute/value pairs for configuring +# Splunk's processing properties. +# + +[junit] +KV_MODE = xml +TRUNCATE = 100000 +BREAK_ONLY_BEFORE = <\?xml version= +MAX_EVENTS = 10000 +SHOULD_LINEMERGE = False +LINE_BREAKER = (?s)(\s+).+?[^<][^\/][^c][^a][^s][^e][^>] + +[build_result] +KV_MODE = xml +BREAK_ONLY_BEFORE = <\?xml version= +SHOULD_LINEMERGE = True +MAX_EVENTS = 10000 + +[sitespeed_result] +SHOULD_LINEMERGE = true +INDEXED_EXTRACTIONS = json +NO_BINARY_CHECK = true +KV_MODE = none +TRUNCATE = 0 + +[jenkins_build] +BREAK_ONLY_BEFORE = +DATETIME_CONFIG = +NO_BINARY_CHECK = true +category = Application +description = Jenkins build logs +disabled = false +maxDist = 75 +pulldown_type = 1 + +[syslog] +TRANSFORMS-anonymize = ansible-output diff --git a/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/transforms.conf.j2 b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/transforms.conf.j2 new file mode 100644 index 00000000000..76d0a4847f0 --- /dev/null +++ b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/transforms.conf.j2 @@ -0,0 +1,6 @@ +[ansible-output] +REGEX = ansible-(.*) +# We want to anonymize Ansible command output in log events as it is +# logging passwords in Splunk index (See https://openedx.atlassian.net/browse/OPS-3241). +FORMAT = ansible-output: Anonymized for security.######################### +DEST_KEY = _raw diff --git a/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/web.conf.j2 b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/web.conf.j2 new file mode 100644 index 00000000000..cb3cb05d983 --- /dev/null +++ b/playbooks/roles/splunk-server/templates/opt/splunk/etc/system/local/web.conf.j2 @@ -0,0 +1,15 @@ +[settings] +enableSplunkWebSSL = 0 +{% if SPLUNK_PROXY_ENABLED %} +tools.proxy.on = true +tools.proxy.base = {{ SPLUNK_PROXY_URL }} + +# If set to "0", prevents the application server from +# being run from splunkd. Instead, Splunk Web starts as +# a separate python-based service which directly listens to the +# httpport. This is how Splunk 6.1.X and earlier behaved. + +# If you dont set this - putting splunk behind a proxy that terminates TLS will not work, as it will complain about strict SSO Mode being set +# even if you set SSOMode = permissive +appServerPorts = 0 +{% endif %} diff --git a/playbooks/roles/splunkforwarder/handlers/main.yml b/playbooks/roles/splunkforwarder/handlers/main.yml index d6da5f92fc7..07c62e9634d 100644 --- a/playbooks/roles/splunkforwarder/handlers/main.yml +++ b/playbooks/roles/splunkforwarder/handlers/main.yml @@ -18,5 +18,5 @@ # Restart Splunk - name: restart splunkforwarder service: - name: splunk + name: SplunkForwarder state: restarted diff --git a/playbooks/roles/splunkforwarder/tasks/main.yml b/playbooks/roles/splunkforwarder/tasks/main.yml index 7ec85aaaf0d..8c646292d3f 100644 --- a/playbooks/roles/splunkforwarder/tasks/main.yml +++ b/playbooks/roles/splunkforwarder/tasks/main.yml @@ -24,9 +24,8 @@ # Install Splunk Forwarder for common_debian_variants - name: Install splunkforwarder specific system packages apt: - name: "{{ item }}" + name: "{{ splunk_debian_pkgs }}" state: present - with_items: "{{ splunk_debian_pkgs }}" tags: - splunk - install @@ -38,7 +37,7 @@ dest: "/tmp/{{ SPLUNKFORWARDER_DEB }}" url: "{{ SPLUNKFORWARDER_PACKAGE_URL }}" register: download_deb - until: download_deb|succeeded + until: download_deb is succeeded retries: 5 when: ansible_distribution in common_debian_variants @@ -52,7 +51,7 @@ dest: "/tmp/{{ SPLUNKFORWARDER_RPM }}" url: "{{ SPLUNKFORWARDER_PACKAGE_URL }}" register: download_rpm - until: download_rpm|succeeded + until: download_rpm is succeeded retries: 5 when: ansible_distribution in common_redhat_variants @@ -69,29 +68,32 @@ groups: syslog,adm when: download_rpm.changed or download_deb.changed -# Need to start splunk manually so that it can create various files -# and directories that aren't created till the first run and are needed -# to run some of the below commands. -- name: Start splunk manually - shell: "{{ splunkforwarder_output_dir }}/bin/splunk start --accept-license --answer-yes --no-prompt" - args: - creates: "{{ splunkforwarder_output_dir }}/var/lib/splunk" - when: download_rpm.changed or download_deb.changed - register: started_manually - -- name: Stop splunk manually - shell: "{{ splunkforwarder_output_dir }}/bin/splunk stop --accept-license --answer-yes --no-prompt" - when: (download_rpm.changed or download_deb.changed) and started_manually.changed - - name: Create boot script - shell: "{{ splunkforwarder_output_dir }}/bin/splunk enable boot-start -user splunk --accept-license --answer-yes --no-prompt" + shell: "{{ splunkforwarder_output_dir }}/bin/splunk enable boot-start -systemd-unit-file-name SplunkForwarder.service -user splunk --accept-license --answer-yes --no-prompt" args: - creates: /etc/init.d/splunk + creates: /etc/systemd/system/SplunkForwarder.service register: create_boot_script when: download_rpm.changed or download_deb.changed + +- name: Remove old boot script + file: + path: /etc/init.d/splunk + state: absent + when: (download_rpm.changed or download_deb.changed) and create_boot_script.changed + +- name: Accept the Splunk license automatically when starting + lineinfile: + dest: /etc/systemd/system/SplunkForwarder.service + regexp: '^ExecStart=' + line: 'ExecStart=/opt/splunkforwarder/bin/splunk _internal_launch_under_systemd --accept-license --no-prompt --answer-yes' + when: (download_rpm.changed or download_deb.changed) and create_boot_script.changed + +- name: reload systemd configuration + command: systemctl daemon-reload + when: (download_rpm.changed or download_deb.changed) and create_boot_script.changed notify: restart splunkforwarder -- name: Ensure splunk forder permissions +- name: Ensure splunk forwader permissions file: path: "{{ splunkforwarder_output_dir }}" state: directory @@ -101,21 +103,6 @@ when: download_rpm.changed or download_deb.changed notify: restart splunkforwarder -- name: Update admin pasword - shell: "{{ splunkforwarder_output_dir }}/bin/splunk edit user admin -password '{{ SPLUNKFORWARDER_PASSWORD }}' -auth admin:changeme --accept-license --answer-yes --no-prompt" - when: download_rpm.changed or download_deb.changed - notify: restart splunkforwarder - -- name: Add chkconfig to init script - lineinfile: - dest: /etc/init.d/splunk - regexp: "^# chkconfig: 235 98 55" - line: "# chkconfig: 235 98 55" - insertafter: "#!/bin/sh" - state: present - when: (download_rpm.changed or download_deb.changed) and create_boot_script.changed - notify: restart splunkforwarder - - name: Make sure necessary dirs exist file: path: "{{ item }}" @@ -146,7 +133,7 @@ when: item.ssl_cert is defined with_items: "{{ SPLUNKFORWARDER_SERVERS }}" -- name: Create inputs and outputs configuration +- name: Create inputs, outputs and server configuration template: src: "opt/splunkforwarder/etc/system/local/{{ item }}.conf.j2" dest: "/opt/splunkforwarder/etc/system/local/{{ item }}.conf" @@ -156,4 +143,5 @@ with_items: - inputs - outputs + - server notify: restart splunkforwarder diff --git a/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/inputs.conf.j2 b/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/inputs.conf.j2 index 1fc90303bcd..e42b7d277c0 100644 --- a/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/inputs.conf.j2 +++ b/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/inputs.conf.j2 @@ -31,4 +31,7 @@ followSymlink = {{ loggable.followSymlink }} {% if loggable.crcSalt is defined %} crcSalt = {{ loggable.crcSalt }} {% endif %} +{% if loggable.whitelist is defined %} +whitelist = {{ loggable.whitelist }} +{% endif %} {% endfor %} diff --git a/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/server.conf.j2 b/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/server.conf.j2 new file mode 100644 index 00000000000..f4cdbd2bf26 --- /dev/null +++ b/playbooks/roles/splunkforwarder/templates/opt/splunkforwarder/etc/system/local/server.conf.j2 @@ -0,0 +1,2 @@ +[httpServer] +disableDefaultPort = true diff --git a/playbooks/roles/sqlite_fix/defaults/main.yml b/playbooks/roles/sqlite_fix/defaults/main.yml new file mode 100644 index 00000000000..c5f0594f8db --- /dev/null +++ b/playbooks/roles/sqlite_fix/defaults/main.yml @@ -0,0 +1,21 @@ +--- +SQLITE_FIX_PYTHON_PATH: "python" +SQLITE_FIX_TMP_DIR: "/var/tmp/sqlite_fix" + +PYSQLITE_URL: "https://codeload.github.com/ghaering/pysqlite/tar.gz/2.8.3" +PYSQLITE_CREATED_PATH: "pysqlite-2.8.3" +PYSQLITE_TMP_PATH: "{{ SQLITE_FIX_TMP_DIR }}/{{ PYSQLITE_CREATED_PATH }}" + +SQLITE_AUTOCONF_URL: "https://www.sqlite.org/2016/sqlite-autoconf-3140100.tar.gz" +SQLITE_AUTOCONF_CREATED_PATH: "sqlite-autoconf-3140100" +SQLITE_TMP_PATH: "{{ SQLITE_FIX_TMP_DIR }}/{{ SQLITE_AUTOCONF_CREATED_PATH }}" + +sqlite_s3_packages: + - name: libsqlite3-0_3.14.1-1build1_amd64.deb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/libsqlite3-0_3.14.1-1build1_amd64.deb + - name: libsqlite3-0-dbg_3.14.1-1build1_amd64.deb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/libsqlite3-0-dbg_3.14.1-1build1_amd64.deb + - name: libsqlite3-dev_3.14.1-1build1_amd64.deb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/libsqlite3-dev_3.14.1-1build1_amd64.deb + - name: libsqlite3-dev-dbgsym_3.14.1-1build1_amd64.ddeb + url: https://s3.amazonaws.com/vagrant.testeng.edx.org/libsqlite3-dev-dbgsym_3.14.1-1build1_amd64.ddeb diff --git a/playbooks/roles/sqlite_fix/tasks/main.yml b/playbooks/roles/sqlite_fix/tasks/main.yml new file mode 100644 index 00000000000..5d2ccd5a3a8 --- /dev/null +++ b/playbooks/roles/sqlite_fix/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: Creates directory + file: + path: "{{ SQLITE_FIX_TMP_DIR }}" + state: directory + mode: 0775 + tags: + - devstack + - devstack:install + - install + + +# Tasks to download and upgrade pysqlite to prevent segfaults when testing in devstack +- name: Download and unzip sqlite autoconf update + unarchive: + src: "{{ SQLITE_AUTOCONF_URL }}" + dest: "{{ SQLITE_FIX_TMP_DIR }}" + remote_src: yes + tags: + - devstack + - devstack:install + - install + +# Python 3 requires a version of sqlite that is not present in the Xenial +# repositories. Download it from an s3 bucket +- name: + apt: + name: libsqlite3-dev + state: absent + +- name: Download newer versions of sqlite that are not availble in Xenial + get_url: + dest: "/tmp/{{ item.name }}" + url: "{{ item.url }}" + with_items: "{{ sqlite_s3_packages }}" + +- name: Install sqlite packages + shell: gdebi -nq /tmp/{{ item.name }} + with_items: "{{ sqlite_s3_packages }}" diff --git a/playbooks/roles/stop_all_edx_services/handlers/main.yml b/playbooks/roles/stop_all_edx_services/handlers/main.yml index 7f334a5dc9f..61e781e2b5c 100644 --- a/playbooks/roles/stop_all_edx_services/handlers/main.yml +++ b/playbooks/roles/stop_all_edx_services/handlers/main.yml @@ -18,6 +18,10 @@ # an AMI. # # +- name: supervisorctl_stop_all + shell: /edx/bin/supervisorctl stop all + ignore_errors: True + - name: stop supervisor service: name: supervisor diff --git a/playbooks/roles/stop_all_edx_services/tasks/main.yml b/playbooks/roles/stop_all_edx_services/tasks/main.yml index 814aca31c03..38520a69ebe 100644 --- a/playbooks/roles/stop_all_edx_services/tasks/main.yml +++ b/playbooks/roles/stop_all_edx_services/tasks/main.yml @@ -22,11 +22,51 @@ # - stop_all_edx_services # # -- name: Stop supervisor + +# Hermes is not used everywhere, try to stop it if it is running. +# We ignore errors here because of the various states the process +# and machine could be in and because we do not install Hermes +# on everything. +- name: stop Hermes if it exists + service: + name: "hermes" + state: stopped + ignore_errors: yes + +- name: supervisorctl stop all stat: path: /etc/init/supervisor.conf register: stat_out changed_when: stat_out is defined and stat_out.stat.exists + notify: + - supervisorctl_stop_all + +- name: supervisorctl stop all systemd + stat: + path: /etc/systemd/system/supervisor.service + register: stat_out + changed_when: stat_out is defined and stat_out.stat.exists + notify: + - supervisorctl_stop_all + +- name: Get supervisorctl output + shell: "/edx/bin/supervisorctl status" + register: supervisorctl_command_result + ignore_errors: True + +- name: Stop supervisor + stat: + path: /etc/init/supervisor.conf + register: stat_out + changed_when: stat_out is defined and stat_out.stat.exists and 'RUNNING' not in supervisorctl_command_result.stdout + notify: + - stop supervisor + +- name: Stop supervisor systemd + stat: + path: /etc/systemd/system/supervisor.service + register: stat_out + changed_when: stat_out is defined and stat_out.stat.exists and 'RUNNING' not in supervisorctl_command_result.stdout notify: - stop supervisor @@ -62,13 +102,5 @@ notify: - stop mongodb -- name: Stop supervisor systemd - stat: - path: /etc/systemd/system/supervisor.service - register: stat_out - changed_when: stat_out is defined and stat_out.stat.exists - notify: - - stop supervisor - - shell: "true" notify: kill processes by user diff --git a/playbooks/roles/supervisor/defaults/main.yml b/playbooks/roles/supervisor/defaults/main.yml index a27636a3191..af972c35f26 100644 --- a/playbooks/roles/supervisor/defaults/main.yml +++ b/playbooks/roles/supervisor/defaults/main.yml @@ -13,11 +13,6 @@ --- SUPERVISOR_HTTP_BIND_IP: '127.0.0.1' -# Used by the pre-supervisor script if you want to -# notify a hipchat room with the output. -SUPERVISOR_HIPCHAT_API_KEY: !!null -SUPERVISOR_HIPCHAT_ROOM: default - # do not override the bind_port since # all supervisors will then try to listen # on the same one @@ -41,42 +36,10 @@ supervisor_cfg: "{{ supervisor_app_dir }}/supervisord.conf" supervisor_service: supervisor supervisor_service_user: "{{ common_web_user }}" -supervisor_version: 3.2.3 +SUPERVISOR_VERSION: 3.2.3 +supervisor_version: "{{ SUPERVISOR_VERSION }}" supervisor_pip_pkgs: - - "boto=={{ common_boto_version }}" - - "python-simple-hipchat" + - boto=={{common_boto_version}} -supervisor_spec: - - service: edxapp - python: python.edxapp - code: "{{ edxapp_code_dir | default(None) }}" - env: "{{ edxapp_app_dir | default(None) }}/edxapp_env" - - service: xqueue - python: python.xqueue - code: "{{ xqueue_code_dir | default(None) }}" - env: "{{ xqueue_app_dir | default(none) }}/xqueue_env" - - service: ecommerce - python: python.ecommerce - code: "{{ ecommerce_code_dir | default(None) }}" - env: "{{ ecommerce_home | default(None) }}/ecommerce_env" - - service: insights - python: python.insights - code: "{{ insights_code_dir | default(None) }}" - env: "{{ insights_home | default(None) }}/insights_env" - - service: analytics_api - python: python.analytics_api - code: "{{ analytics_api_code_dir | default(None) }}" - env: "{{ analytics_api_home | default(None) }}/analytics_api_env" - - service: credentials - python: python.credentials - code: "{{ credentials_code_dir | default(None) }}" - env: "{{ credentials_home | default(None) }}/credentials_env" - - service: discovery - python: python.discovery - code: "{{ discovery_code_dir | default(None) }}" - env: "{{ discovery_home | default(None) }}/discovery_env" - - service: journals - python: python.journals - code: "{{ journals_code_dir | default(None) }}" - env: "{{ journals_home | default(None) }}/journals_env" +supervisor_spec: [] diff --git a/playbooks/roles/supervisor/files/pre_supervisor_checks.py b/playbooks/roles/supervisor/files/pre_supervisor_checks.py index ada2fa2bf50..428fd058c34 100755 --- a/playbooks/roles/supervisor/files/pre_supervisor_checks.py +++ b/playbooks/roles/supervisor/files/pre_supervisor_checks.py @@ -1,8 +1,9 @@ +from __future__ import absolute_import +from __future__ import print_function import argparse import boto.ec2 from boto.utils import get_instance_metadata, get_instance_identity from boto.exception import AWSConnectionError -import hipchat import os import subprocess import traceback @@ -10,22 +11,16 @@ import time # Services that should be checked for migrations. -MIGRATION_COMMANDS = { - 'lms': "/edx/bin/edxapp-migrate-lms --noinput --list", - 'cms': "/edx/bin/edxapp-migrate-cms --noinput --list", - 'xqueue': ". {env_file}; sudo -E -u xqueue {python} {code_dir}/manage.py showmigrations", - 'ecommerce': ". {env_file}; sudo -E -u ecommerce {python} {code_dir}/manage.py showmigrations", - 'insights': ". {env_file}; sudo -E -u insights {python} {code_dir}/manage.py showmigrations", - 'analytics_api': ". {env_file}; sudo -E -u analytics_api {python} {code_dir}/manage.py showmigrations", - 'credentials': ". {env_file}; sudo -E -u credentials {python} {code_dir}/manage.py showmigrations", - 'discovery': ". {env_file}; sudo -E -u discovery {python} {code_dir}/manage.py showmigrations", - 'journals': ". {env_file}; sudo -E -u journals {python} {code_dir}/manage.py showmigrations", +GENERIC_MIGRATION_COMMAND = ". {env_file}; sudo -E -u {user} {python} {code_dir}/manage.py showmigrations" +EDXAPP_MIGRATION_COMMANDS = { + 'lms': "/edx/bin/edxapp-migrate-lms --noinput --list", + 'cms': "/edx/bin/edxapp-migrate-cms --noinput --list", + 'workers': "/edx/bin/edxapp-migrate-cms --noinput --list; /edx/bin/edxapp-migrate-lms --noinput --list", } NGINX_ENABLE = { - 'lms': "sudo ln -sf /edx/app/nginx/sites-available/lms /etc/nginx/sites-enabled/lms", - 'cms': "sudo ln -sf /edx/app/nginx/sites-available/cms /etc/nginx/sites-enabled/cms", + 'lms': "sudo ln -sf /edx/app/nginx/sites-available/lms /etc/nginx/sites-enabled/lms", + 'cms': "sudo ln -sf /edx/app/nginx/sites-available/cms /etc/nginx/sites-enabled/cms", } -HIPCHAT_USER = "PreSupervisor" # Max amount of time to wait for tags to be applied. MAX_BACKOFF = 120 @@ -75,100 +70,23 @@ def edp_for_instance(instance_id): parser.add_argument("-e","--enabled", help="The location of the enabled services.") - migration_args = parser.add_argument_group("edxapp_migrations", - "Args for running edxapp migration checks.") - migration_args.add_argument("--edxapp-code-dir", - help="Location of the edx-platform code.") - migration_args.add_argument("--edxapp-python", - help="Path to python to use for executing migration check.") - migration_args.add_argument("--edxapp-env", - help="Location of the edxapp environment file.") - - xq_migration_args = parser.add_argument_group("xqueue_migrations", - "Args for running xqueue migration checks.") - xq_migration_args.add_argument("--xqueue-code-dir", - help="Location of the xqueue code.") - xq_migration_args.add_argument("--xqueue-python", - help="Path to python to use for executing migration check.") - migration_args.add_argument("--xqueue-env", - help="Location of the xqueue environment file.") - - ecom_migration_args = parser.add_argument_group("ecommerce_migrations", - "Args for running ecommerce migration checks.") - ecom_migration_args.add_argument("--ecommerce-python", - help="Path to python to use for executing migration check.") - ecom_migration_args.add_argument("--ecommerce-env", - help="Location of the ecommerce environment file.") - ecom_migration_args.add_argument("--ecommerce-code-dir", - help="Location of the ecommerce code.") - - credentials_migration_args = parser.add_argument_group("credentials_migrations", - "Args for running credentials migration checks.") - credentials_migration_args.add_argument("--credentials-python", - help="Path to python to use for executing migration check.") - credentials_migration_args.add_argument("--credentials-env", - help="Location of the credentials environment file.") - credentials_migration_args.add_argument("--credentials-code-dir", - help="Location of the credentials code.") - - discovery_migration_args = parser.add_argument_group("discovery_migrations", - "Args for running discovery migration checks.") - discovery_migration_args.add_argument("--discovery-python", + app_migration_args = parser.add_argument_group("app_migrations", + "Args for running app migration checks.") + app_migration_args.add_argument("--check-migrations", action='store_true', + help="Enable checking migrations.") + app_migration_args.add_argument("--check-migrations-service-names", + help="Comma seperated list of service names that should be checked for migrations") + app_migration_args.add_argument("--app-python", help="Path to python to use for executing migration check.") - discovery_migration_args.add_argument("--discovery-env", - help="Location of the discovery environment file.") - discovery_migration_args.add_argument("--discovery-code-dir", - help="Location of the discovery code.") - - journals_migration_args = parser.add_argument_group("journals_migrations", - "Args for running journals migration checks.") - journals_migration_args.add_argument("--journals-python", - help="Path to python to use for executing migration check.") - journals_migration_args.add_argument("--journals-env", - help="Location of the journals environment file.") - journals_migration_args.add_argument("--journals-code-dir", - help="Location of the journals code.") - - - insights_migration_args = parser.add_argument_group("insights_migrations", - "Args for running insights migration checks.") - insights_migration_args.add_argument("--insights-python", - help="Path to python to use for executing migration check.") - insights_migration_args.add_argument("--insights-env", - help="Location of the insights environment file.") - insights_migration_args.add_argument("--insights-code-dir", - help="Location of the insights code.") - - analyticsapi_migration_args = parser.add_argument_group("analytics_api_migrations", - "Args for running analytics_api migration checks.") - analyticsapi_migration_args.add_argument("--analytics-api-python", - help="Path to python to use for executing migration check.") - analyticsapi_migration_args.add_argument("--analytics-api-env", - help="Location of the analytics_api environment file.") - analyticsapi_migration_args.add_argument("--analytics-api-code-dir", - help="Location of the analytics_api code.") - - hipchat_args = parser.add_argument_group("hipchat", - "Args for hipchat notification.") - hipchat_args.add_argument("-c","--hipchat-api-key", - help="Hipchat token if you want to receive notifications via hipchat.") - hipchat_args.add_argument("-r","--hipchat-room", - help="Room to send messages to.") + app_migration_args.add_argument("--app-env", + help="Location of the app environment file.") + app_migration_args.add_argument("--app-code-dir", + help="Location of the app code.") args = parser.parse_args() report = [] prefix = None - notify = None - - try: - if args.hipchat_api_key: - hc = hipchat.HipChat(token=args.hipchat_api_key) - notify = lambda message: hc.message_room(room_id=args.hipchat_room, - message_from=HIPCHAT_USER, message=message) - except Exception as e: - print("Failed to initialize hipchat, {}".format(e)) - traceback.print_exc() instance_id = get_instance_metadata()['instance-id'] prefix = instance_id @@ -197,7 +115,7 @@ def edp_for_instance(instance_id): instance_id=instance_id) break except Exception as e: - print("Failed to get EDP for {}: {}".format(instance_id, str(e))) + print(("Failed to get EDP for {}: {}".format(instance_id, str(e)))) # With the time limit being 2 minutes we will # try 5 times before giving up. time.sleep(backoff) @@ -207,13 +125,13 @@ def edp_for_instance(instance_id): if environment is None or deployment is None or play is None: msg = "Unable to retrieve environment, deployment, or play tag." print(msg) - if notify: - notify("{} : {}".format(prefix, msg)) - exit(0) + exit(1) #get the hostname of the sandbox hostname = socket.gethostname() + ami_id = get_instance_metadata()['ami-id'] + try: #get the list of the volumes, that are attached to the instance volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id}) @@ -224,12 +142,11 @@ def edp_for_instance(instance_id): "deployment": deployment, "cluster": play, "instance-id": instance_id, + "ami-id": ami_id, "created": volume.create_time }) except Exception as e: msg = "Failed to tag volumes associated with {}: {}".format(instance_id, str(e)) print(msg) - if notify: - notify(msg) try: for service in services_for_instance(instance_id): @@ -239,30 +156,38 @@ def edp_for_instance(instance_id): # We have to reload the new config files subprocess.call("/bin/systemctl reload nginx", shell=True) - if service in MIGRATION_COMMANDS: - services = { - "lms": {'python': args.edxapp_python, 'env_file': args.edxapp_env, 'code_dir': args.edxapp_code_dir}, - "cms": {'python': args.edxapp_python, 'env_file': args.edxapp_env, 'code_dir': args.edxapp_code_dir}, - "ecommerce": {'python': args.ecommerce_python, 'env_file': args.ecommerce_env, 'code_dir': args.ecommerce_code_dir}, - "credentials": {'python': args.credentials_python, 'env_file': args.credentials_env, 'code_dir': args.credentials_code_dir}, - "discovery": {'python': args.discovery_python, 'env_file': args.discovery_env, 'code_dir': args.discovery_code_dir}, - "journals": {'python': args.journals_python, 'env_file': args.journals_env, 'code_dir': args.journals_code_dir}, - "insights": {'python': args.insights_python, 'env_file': args.insights_env, 'code_dir': args.insights_code_dir}, - "analytics_api": {'python': args.analytics_api_python, 'env_file': args.analytics_api_env, 'code_dir': args.analytics_api_code_dir}, - "xqueue": {'python': args.xqueue_python, 'env_file': args.xqueue_env, 'code_dir': args.xqueue_code_dir}, - } - - if service in services and all(arg!=None for arg in services[service].values()) and service in MIGRATION_COMMANDS: - serv_vars = services[service] - - cmd = MIGRATION_COMMANDS[service].format(**serv_vars) - if os.path.exists(serv_vars['code_dir']): - os.chdir(serv_vars['code_dir']) - # Run migration check command. - output = subprocess.check_output(cmd, shell=True, ) - if '[ ]' in output: - raise Exception("Migrations have not been run for {}".format(service)) - + if (args.check_migrations and + args.app_python != None and + args.app_env != None and + args.app_code_dir != None and + args.check_migrations_service_names != None and + service in args.check_migrations_service_names.split(',')): + + user = play + # Legacy naming workaround + # Using the play works everywhere but here. + if user == "analyticsapi": + user="analytics_api" + + cmd_vars = { + 'python': args.app_python, + 'env_file': args.app_env, + 'code_dir': args.app_code_dir, + 'service': service, + 'user': user, + } + cmd = GENERIC_MIGRATION_COMMAND.format(**cmd_vars) + if service in EDXAPP_MIGRATION_COMMANDS: + cmd = EDXAPP_MIGRATION_COMMANDS[service] + + if os.path.exists(cmd_vars['code_dir']): + os.chdir(cmd_vars['code_dir']) + # Run migration check command. + output = subprocess.check_output(cmd, shell=True, ) + if '[ ]' in output: + raise Exception("Migrations have not been run for {}".format(service)) + else: + report.append("Checked migrations: {}".format(service)) # Link to available service. available_file = os.path.join(args.available, "{}.conf".format(service)) @@ -275,19 +200,12 @@ def edp_for_instance(instance_id): except AWSConnectionError as ae: msg = "{}: ERROR : {}".format(prefix, ae) - if notify: - notify(msg) - notify(traceback.format_exc()) raise ae except Exception as e: msg = "{}: ERROR : {}".format(prefix, e) print(msg) - if notify: - notify(msg) traceback.print_exc() raise e else: msg = "{}: {}".format(prefix, " | ".join(report)) print(msg) - if notify: - notify(msg) diff --git a/playbooks/roles/supervisor/tasks/main.yml b/playbooks/roles/supervisor/tasks/main.yml index ee09d2b72dd..7a000e449ef 100644 --- a/playbooks/roles/supervisor/tasks/main.yml +++ b/playbooks/roles/supervisor/tasks/main.yml @@ -105,12 +105,11 @@ - name: Install supervisor in its venv pip: - name: "{{ item }}" + name: "{{ supervisor_pip_pkgs }}" virtualenv: "{{ supervisor_venv_dir }}" state: present extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }}" become_user: "{{ supervisor_user }}" - with_items: "{{ supervisor_pip_pkgs }}" tags: - install - install:base @@ -134,7 +133,7 @@ dest: "/etc/systemd/system/{{ supervisor_service }}.service" owner: root group: root - when: ansible_distribution_release == 'xenial' + when: ansible_distribution_release == 'xenial' or ansible_distribution_release == 'bionic' tags: - install - install:base @@ -212,7 +211,7 @@ service: name: "{{ supervisor_service }}.service" enabled: yes - when: ansible_distribution_release == 'xenial' and docker_container.stdout != 'yes' + when: (ansible_distribution_release == 'xenial' or ansible_distribution_release == 'bionic') and docker_container.stdout != 'yes' tags: - install - install:base diff --git a/playbooks/roles/supervisor/templates/etc/init/supervisor-systemd.service.j2 b/playbooks/roles/supervisor/templates/etc/init/supervisor-systemd.service.j2 index 86a53911f3e..358bfd6b8b6 100644 --- a/playbooks/roles/supervisor/templates/etc/init/supervisor-systemd.service.j2 +++ b/playbooks/roles/supervisor/templates/etc/init/supervisor-systemd.service.j2 @@ -8,14 +8,10 @@ After=network.target {% if disable_edx_services and not devstack -%} # Run pre_supervisor ExecStartPre={{ supervisor_venv_dir }}/bin/python {{ supervisor_app_dir }}/pre_supervisor_checks.py \ - {% if SUPERVISOR_HIPCHAT_API_KEY is defined -%} - --hipchat-api-key {{ SUPERVISOR_HIPCHAT_API_KEY }} --hipchat-room {{ SUPERVISOR_HIPCHAT_ROOM }} \ - {% endif -%} - {%- for item in supervisor_spec -%} + {% for item in supervisor_spec -%} {%- if item.code -%} - {%- set name = item.service.replace('_', '-') -%} - --{{ name }}-python {{ COMMON_BIN_DIR }}/{{ item.python }} --{{ name }}-code-dir {{ item.code }} - {%- if item.env is defined %} --{{ name }}-env {{ item.env }}{% endif %} \ + --check-migrations --check-migrations-service-names {{ item.migration_check_services }} --app-python {{ COMMON_BIN_DIR }}/{{ item.python }} --app-code-dir {{ item.code }} + {%- if item.env is defined %} --app-env {{ item.env }}{% endif %} \ {% endif %} {%- endfor -%} --available={{ supervisor_available_dir }} --enabled={{ supervisor_cfg_dir }} diff --git a/playbooks/roles/test_build_server/defaults/main.yml b/playbooks/roles/test_build_server/defaults/main.yml index cf40b119633..e649e6f8519 100644 --- a/playbooks/roles/test_build_server/defaults/main.yml +++ b/playbooks/roles/test_build_server/defaults/main.yml @@ -16,4 +16,5 @@ # test_build_server_user: jenkins test_build_server_repo_path: /home/jenkins -test_edx_platform_version: master +TEST_EDX_PLATFORM_VERSION: master +test_edx_platform_version: "{{ TEST_EDX_PLATFORM_VERSION }}" diff --git a/playbooks/roles/test_build_server/files/test-development-environment.sh b/playbooks/roles/test_build_server/files/test-development-environment.sh index ab7b1ceeefb..2a4cc1d48b7 100755 --- a/playbooks/roles/test_build_server/files/test-development-environment.sh +++ b/playbooks/roles/test_build_server/files/test-development-environment.sh @@ -18,6 +18,7 @@ cd edx-platform-clone # This will run all of the setup it usually runs, but none of the # tests because TEST_SUITE isn't defined. +export PYTHON_VERSION=3.5 source scripts/jenkins-common.sh case "$1" in @@ -28,6 +29,9 @@ case "$1" in paver test_lib -t common/lib/xmodule/xmodule/tests/test_stringify.py # Generate some coverage reports + # Since `TEST_SUITE` is not set, change the coverage file written by the + # previous test to a generic one. + cp reports/common_lib_xmodule.coverage reports/.coverage paver coverage # Run some of the djangoapp unit tests @@ -46,13 +50,6 @@ case "$1" in # Run some of the bok-choy tests paver test_bokchoy -t discussion/test_discussion.py::DiscussionTabSingleThreadTest paver test_bokchoy -t studio/test_studio_outline.py::WarningMessagesTest::test_unreleased_published_locked --fasttest - paver test_bokchoy -t lms/test_lms_matlab_problem.py::MatlabProblemTest --fasttest - ;; - - "lettuce") - # Run some of the lettuce acceptance tests - paver test_acceptance -s lms --extra_args="lms/djangoapps/courseware/features/problems.feature -s 1" - paver test_acceptance -s cms --extra_args="cms/djangoapps/contentstore/features/html-editor.feature -s 1" --fasttest ;; "quality") diff --git a/playbooks/roles/test_build_server/tasks/main.yml b/playbooks/roles/test_build_server/tasks/main.yml index 2bae556251e..497d79210e3 100644 --- a/playbooks/roles/test_build_server/tasks/main.yml +++ b/playbooks/roles/test_build_server/tasks/main.yml @@ -40,9 +40,10 @@ shell: "bash test-development-environment.sh {{ item }}" args: chdir: "{{ test_build_server_repo_path }}/" + environment: + PYTHON_VERSION: "3.5" become_user: "{{ test_build_server_user }}" with_items: - "unit" - "js" - "bokchoy" - - "lettuce" diff --git a/playbooks/roles/testcourses/tasks/deploy.yml b/playbooks/roles/testcourses/tasks/deploy.yml index 01cb4887f1b..76e8753025e 100644 --- a/playbooks/roles/testcourses/tasks/deploy.yml +++ b/playbooks/roles/testcourses/tasks/deploy.yml @@ -6,7 +6,7 @@ loop_var: course - name: enroll test users in the testcourses - shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings={{ demo_edxapp_settings }} --service-variant lms enroll_user_in_course -e {{ item[0].email }} -c {{ item[1].course_id }}" + shell: ". {{ demo_edxapp_env }} && {{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings={{ demo_edxapp_settings }} --service-variant lms enroll_user_in_course -e {{ item[0].email }} -c {{ item[1].course_id }}" args: chdir: "{{ demo_edxapp_code_dir }}" become_user: "{{ common_web_user }}" diff --git a/playbooks/roles/testcourses/tasks/import_course.yml b/playbooks/roles/testcourses/tasks/import_course.yml index ac364c380bc..bb9be1f6e9d 100644 --- a/playbooks/roles/testcourses/tasks/import_course.yml +++ b/playbooks/roles/testcourses/tasks/import_course.yml @@ -7,7 +7,7 @@ register: testcourse_checkout - name: import testcourse - shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings={{ demo_edxapp_settings }} import {{ demo_app_dir }} {{ course.course_id }}" + shell: ". {{ demo_edxapp_env }} && {{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings={{ demo_edxapp_settings }} import {{ demo_app_dir }} {{ course.course_id }}" args: chdir: "{{ demo_edxapp_code_dir }}" when: testcourse_checkout.changed diff --git a/playbooks/roles/tools_jenkins/defaults/main.yml b/playbooks/roles/tools_jenkins/defaults/main.yml index 7cdcddb3056..fe2a587be4b 100644 --- a/playbooks/roles/tools_jenkins/defaults/main.yml +++ b/playbooks/roles/tools_jenkins/defaults/main.yml @@ -1,6 +1,7 @@ --- -jenkins_tools_version: "1.651.3" +JENKINS_TOOLS_VERSION: "1.651.3" +jenkins_tools_version: "{{ JENKINS_TOOLS_VERSION }}" jenkins_tools_plugins: - { name: "cloudbees-folder", version: "5.12" } @@ -19,7 +20,6 @@ jenkins_tools_plugins: - { name: "conditional-buildstep", version: "1.3.5" } - { name: "run-condition", version: "0.10" } - { name: "multiple-scms", version: "0.6" } - - { name: "hipchat", version: "1.0.0" } - { name: "throttle-concurrents", version: "1.9.0" } - { name: "mask-passwords", version: "2.8" } - { name: "dashboard-view", version: "2.9.10" } diff --git a/playbooks/roles/tools_jenkins/tasks/main.yml b/playbooks/roles/tools_jenkins/tasks/main.yml index 1a8d12af5ca..6d743aefe14 100644 --- a/playbooks/roles/tools_jenkins/tasks/main.yml +++ b/playbooks/roles/tools_jenkins/tasks/main.yml @@ -9,12 +9,14 @@ - install - install:system-requirements -- name: install python3.5 +- name: install python3.5 and 3.6 apt: name: "{{ item }}" with_items: - python3.5 - python3.5-dev + - python3.6 + - python3.6-dev when: ansible_distribution_release == 'trusty' tags: - install diff --git a/playbooks/roles/user/defaults/main.yml b/playbooks/roles/user/defaults/main.yml index 41a009781ba..6371dc0d73a 100644 --- a/playbooks/roles/user/defaults/main.yml +++ b/playbooks/roles/user/defaults/main.yml @@ -35,10 +35,6 @@ user_rbash_links: # will take precedence over the paramter user_info: [] -user_debian_pkgs: - # This is needed for the uri module to work correctly. - - python-httplib2 - # Boolean variable that will cause the user module to stop Ansible with a # failure if a user that has been configured to have their keys pulled from # GitHub does not have any ssh keys configured on GitHub. This is set to diff --git a/playbooks/roles/user/tasks/main.yml b/playbooks/roles/user/tasks/main.yml index d968df5ab4e..ae6587e229a 100644 --- a/playbooks/roles/user/tasks/main.yml +++ b/playbooks/roles/user/tasks/main.yml @@ -70,15 +70,6 @@ # want to provide more binaries add them to user_rbash_links # which can be passed in as a parameter to the role. # -- name: Install debian packages user role needs - apt: - name: "{{ item }}" - install_recommends: yes - state: present - update_cache: yes - with_items: "{{ user_debian_pkgs }}" - when: ansible_distribution in common_debian_variants - - debug: var: user_info @@ -129,7 +120,7 @@ when: item.get('state', 'present') == 'present' and item.github is defined with_items: "{{ user_info }}" register: github_users_return - until: github_users_return|succeeded + until: github_users_return is succeeded retries: 5 - name: Print warning if github user(s) missing ssh key @@ -153,10 +144,30 @@ key: "https://github.com/{{ item.name }}.keys" when: item.github is defined and item.get('state', 'present') == 'present' register: task_result - until: task_result|succeeded + until: task_result is succeeded retries: 5 with_items: "{{ user_info }}" +- name: Create ~/.ssh directory for non github users + file: + path: "/home/{{ item.name }}/.ssh" + state: directory + mode: "0755" + owner: "{{ item.name }}" + group: "{{ item.name }}" + when: item.authorized_keys is defined and item.get('state', 'present') == 'present' + with_items: "{{ user_info }}" + +- name: Build authorized_keys file for non github users + template: + src: "templates/authorized_keys.j2" + dest: "/home/{{ item.name }}/.ssh/authorized_keys" + mode: "0600" + owner: "{{ item.name }}" + group: "{{ item.name }}" + with_items: "{{ user_info }}" + when: item.authorized_keys is defined and item.get('state', 'present') == 'present' + - name: Create bashrc file for normal users template: src: default.bashrc.j2 @@ -234,3 +245,29 @@ with_nested: - "{{ user_info }}" - "{{ user_rbash_links }}" + +- name: Allow users to ssh + lineinfile: + dest: /etc/ssh/sshd_config + regexp: "#users_allowed_to_ssh {{ vars['user_info'] }}" + line: "AllowUsers {{ (( user_info | map(attribute='name') | list ) + [ ansible_ssh_user ]) | join(' ') }} #users_allowed_to_ssh {{ vars['user_info'] }}" + when: user_info|length > 0 + register: users_ssh_access + +- name: Restart ssh (ubuntu/debian) + service: + name: ssh + state: restarted + become: True + when: > + users_ssh_access.changed and + ansible_distribution in common_debian_variants + +- name: Restart ssh (redhat) + service: + name: sshd + state: restarted + become: True + when: > + users_ssh_access.changed and + ansible_distribution in common_redhat_variants diff --git a/playbooks/roles/user/templates/authorized_keys.j2 b/playbooks/roles/user/templates/authorized_keys.j2 new file mode 100644 index 00000000000..67dd4df13c1 --- /dev/null +++ b/playbooks/roles/user/templates/authorized_keys.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +{% for line in item.authorized_keys -%} +{{ line }} +{% endfor %} diff --git a/playbooks/roles/user_retirement_pipeline/README.rst b/playbooks/roles/user_retirement_pipeline/README.rst new file mode 100644 index 00000000000..37e235e2d1a --- /dev/null +++ b/playbooks/roles/user_retirement_pipeline/README.rst @@ -0,0 +1,143 @@ + +User Retirement Pipeline. +############ + +In the Open edX platform, the user experience is enabled by several services, +such as LMS, Studio, ecommerce, credentials, discovery, and more. +Personally Identifiable Identification (PII) about a user can exist in many of +these services. As a consequence, to remove a user’s PII, you must be able to +request each service containing PII to remove, delete, or unlink the data for +that user in that service. + +In the user retirement feature, a centralized process (the driver scripts) +orchestrates all of these requests. For information about how to configure the +driver scripts, see Setting Up the User Retirement Driver Scripts. + +[More info here.](https://edx.readthedocs.io/projects/edx-installing-configuring-and-running/en/latest/configuration/user_retire/implementation_overview.html) + +Configuration & Deployment +************************* +The user retirement pipeline can be deployed together with the edxapp role, on +small deployments that use a single AppServer to host all services, or +standalone, which is the default for bigger installs. + +You can also use ansible-playbook to test this role independently. +It requires you to pass more variables manually because they're not available +except when running inside "edxapp" role. + +When running this role, you'll need to set: + +* `COMMON_RETIREMENT_SERVICE_SETUP`: Set to true to configure the retirement service pipeline +* `RETIREMENT_SERVICE_COOL_OFF_DAYS`: Number of days that an account stays marked for deletion before being picked up be the retirement service +* `RETIREMENT_SERVICE_ENABLE_CRON_JOB`: Set to true if you want to set up a daily cron job for the retirement service +* `EDXAPP_RETIREMENT_SERVICE_USER_EMAIL`: Email of the retirement worker user set up on LMS +* `EDXAPP_RETIREMENT_SERVICE_USER_NAME`: Username of the retirement worker user set up on LMS +* `RETIREMENT_SERVICE_EDX_OAUTH2_KEY`: OAuth2 client id from LMS +* `RETIREMENT_SERVICE_EDX_OAUTH2_SECRET`: OAuth2 client secret from LMS +* `RETIREMENT_LMS_BASE_URL`: Full LMS url (e.g. `https://lms.domain.com`) +* `RETIREMENT_ECOMMERCE_BASE_BASE_URL`: Full LMS url (e.g.`https://lms.domain.com`) +* `RETIREMENT_CREDENTIALS_BASE_URL`: Full LMS url (e.g. `https://lms.domain.com`) + +To use a custom retirement pipeline, you'll need to configure the git remotes +and also the retirement pipeline "steps". + +To set up the git repository, you can follow this template: + +``` +RETIREMENT_SERVICE_GIT_IDENTITY: !!null +RETIREMENT_SERVICE_GIT_REPOS: + - PROTOCOL: "https" + DOMAIN: "github.com" + PATH: "edx" + REPO: "tubular.git" + VERSION: "master" + DESTINATION: "{{ retirement_service_app_dir }}" + SSH_KEY: "{{ RETIREMENT_SERVICE_GIT_IDENTITY }}" +``` + +And to set up the retirement pipeline, you'll need to set +`RETIREMENT_SERVICE_PIPELINE_CONFIGURATION` according to the following example: + +``` +RETIREMENT_SERVICE_PIPELINE_CONFIGURATION: + - NAME: "RETIRING_ENROLLMENTS" + NAME_COMPLETE: "ENROLLMENTS_COMPLETE" + SERVICE: "LMS" + FUNCTION: "retirement_unenroll" + - NAME: "RETIRING_LMS_MISC" + NAME_COMPLETE: "LMS_MISC_COMPLETE" + SERVICE: "LMS" + FUNCTION: "retirement_lms_retire_misc" + - NAME: "RETIRING_LMS" + NAME_COMPLETE: "LMS_COMPLETE" + SERVICE: "LMS" + FUNCTION: "retirement_lms_retire" +``` + +You can also test this role on your Docker devstack, like this: +1. Clone this branch to `./src` folder of your `master` devstack. +2. From the `devstack` folder, run `make lms-shell` and edit `lms.env.json` to set these variables: +``` +.... +"RETIRED_USER_SALTS": ["oWiJVxbtp86kEV4jAHcZXSoSucSSF6GE6qjFA8rZp8yBPMSwKM",], +"EDXAPP_RETIREMENT_SERVICE_USER_NAME": "retirement_service_worker", +"RETIREMENT_STATES": [ + "PENDING", + "RETIRING_ENROLLMENTS", + "ENROLLMENTS_COMPLETE", + "RETIRING_LMS_MISC", + "LMS_MISC_COMPLETE", + "RETIRING_LMS", + "LMS_COMPLETE", + "RETIRING_CREDENTIALS", + "CREDENTIALS_COMPLETE", + "ERRORED", + "ABORTED", + "COMPLETE" +], +... +"FEATURES": { + ... + "ENABLE_ACCOUNT_DELETION": true +} +``` +3. Populate the retirement states: +``` + ./manage.py lms --settings=devstack_docker populate_retirement_states +``` +3. Create the user and OAuth2 Credentials for the retirement worker: +``` +app_name=retirement +user_name=retirement_service_worker +./manage.py lms --settings= manage_user $user_name $user_name@example.com --staff --superuser +./manage.py lms --settings= create_dot_application $app_name $user_name +``` +Take a note of the generated client id and secret, you'll need it to set up the retirement scripts. +4. Now, use the Ansible Role to set up the User Retirement Pipeline: +``` +export PYTHONUNBUFFERED=1 +source /edx/app/edx_ansible/venvs/edx_ansible/bin/activate +cd /edx/src/configuration/playbooks +ansible-playbook \ + -i localhost, \ + -c local run_role.yml \ + -e role=user_retirement_pipeline \ + -e configuration_version=master \ + -e edx_platform_version=master \ + -e edxapp_user=root \ + -e COMMON_RETIREMENT_SERVICE_SETUP=true \ + -e RETIREMENT_SERVICE_COOL_OFF_DAYS=0 \ + -e RETIREMENT_SERVICE_ENABLE_CRON_JOB=true \ + -e EDXAPP_RETIREMENT_SERVICE_USER_EMAIL=retirement_service_worker@example.com \ + -e EDXAPP_RETIREMENT_SERVICE_USER_NAME=retirement_service_worker \ + -e RETIREMENT_SERVICE_EDX_OAUTH2_KEY= \ + -e RETIREMENT_SERVICE_EDX_OAUTH2_SECRET= +``` +3. Check that the retirement pipeline is correctly set up at `/edx/app/retirement_service`. +4. Create some users and go the their account page and mark them for deletion. +![mar](https://user-images.githubusercontent.com/27893385/53957569-6b9da180-40bd-11e9-9139-10c62e499ec4.png) +5. Check [here](http://edx.devstack.lms:18000/admin/user_api/userretirementrequest/) if the retirement requests have been registered. +6. Run the retirement script as root: +``` +/edx/app/retirement_service/retire_users.sh +``` diff --git a/playbooks/roles/user_retirement_pipeline/defaults/main.yml b/playbooks/roles/user_retirement_pipeline/defaults/main.yml new file mode 100644 index 00000000000..73266838edf --- /dev/null +++ b/playbooks/roles/user_retirement_pipeline/defaults/main.yml @@ -0,0 +1,78 @@ +--- +# +# Open edX Retirement Pipeline Configuration +# +# github: https://github.com/edx/configuration +# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS +# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions +# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT +# +# +# Deploy the User Retirement Pipeline +# +# See documentation in README.rst + +# +# This file contains the variables you'll need to pass to the role, and some +# example values. + +retirement_service_name: "retirement-service" +retirement_service_user: "edx_retirement_worker" +retirement_service_home: "{{ COMMON_APP_DIR }}/retirement_service" +retirement_service_app_dir: "{{ retirement_service_home }}/tubular" +retirement_service_data_dir: "{{ COMMON_DATA_DIR }}/retirement_service" +retirement_service_log_dir: "{{ COMMON_LOG_DIR }}/retirement_service" +retirement_service_venv_dir: "{{ retirement_service_home }}/venv" +retirement_service_user_shell: "/bin/bash" +retirement_service_script_path: "{{ retirement_service_app_dir }}/scripts" + +retirement_service_pip_version: "19.0.3" + +retirement_service_environment: + PATH: '{{ retirement_service_venv_dir }}/bin:{{ ansible_env.PATH }}' + +# Set up git repos +RETIREMENT_SERVICE_GIT_IDENTITY: !!null +RETIREMENT_SERVICE_GIT_REPOS: + - PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}" + DOMAIN: "{{ COMMON_GIT_MIRROR }}" + PATH: "{{ COMMON_GIT_PATH }}" + REPO: "tubular.git" + VERSION: "master" + DESTINATION: "{{ retirement_service_app_dir }}" + SSH_KEY: "{{ RETIREMENT_SERVICE_GIT_IDENTITY }}" + +# Enable the setup and configuration of the retirement service +# The setup is handled by COMMON_RETIREMENT_SERVICE_SETUP on common vars +RETIREMENT_SERVICE_COOL_OFF_DAYS: 5 + +# Enable the configuration of a cron job to run this periodically +RETIREMENT_SERVICE_ENABLE_CRON_JOB: false +RETIREMENT_SERVICE_CRON_JOB_HOURS: 0 +RETIREMENT_SERVICE_CRON_JOB_MINUTES: 0 + +# Configuration needed for LMS and OAuth +# The retirement LMS username should be set on edxapp role through +# EDXAPP_RETIREMENT_SERVICE_USER_EMAIL and EDXAPP_RETIREMENT_SERVICE_USER_NAME +RETIREMENT_SERVICE_EDX_OAUTH2_KEY: "PLEASE-SET-THIS-RETIREMENT-CLIENT-ID" +RETIREMENT_SERVICE_EDX_OAUTH2_SECRET: "PLEASE-SET-THIS-RETIREMENT-CLIENT-SECRET" + +# LMS, Ecommerce and Credentials base URL +RETIREMENT_LMS_BASE_URL: 'http://edx.devstack.lms:18000' +RETIREMENT_ECOMMERCE_BASE_BASE_URL: 'http://edx.devstack.ecommerce:18000' +RETIREMENT_CREDENTIALS_BASE_URL: 'http://edx.devstack.credentials:18000' + +# Retirement Pipeline configuration +RETIREMENT_SERVICE_PIPELINE_CONFIGURATION: + - NAME: "RETIRING_ENROLLMENTS" + NAME_COMPLETE: "ENROLLMENTS_COMPLETE" + SERVICE: "LMS" + FUNCTION: "retirement_unenroll" + - NAME: "RETIRING_LMS_MISC" + NAME_COMPLETE: "LMS_MISC_COMPLETE" + SERVICE: "LMS" + FUNCTION: "retirement_lms_retire_misc" + - NAME: "RETIRING_LMS" + NAME_COMPLETE: "LMS_COMPLETE" + SERVICE: "LMS" + FUNCTION: "retirement_lms_retire" diff --git a/playbooks/roles/user_retirement_pipeline/meta/main.yml b/playbooks/roles/user_retirement_pipeline/meta/main.yml new file mode 100644 index 00000000000..7529cd514b8 --- /dev/null +++ b/playbooks/roles/user_retirement_pipeline/meta/main.yml @@ -0,0 +1,11 @@ +dependencies: + - common + - role: add_user + user_name: "{{ retirement_service_user }}" + user_home: "{{ retirement_service_home }}" + group_name: "{{ common_web_group }}" + - role: git_clone + repo_owner: "{{ retirement_service_user }}" + repo_group: "{{ retirement_service_user }}" + GIT_REPOS: "{{ RETIREMENT_SERVICE_GIT_REPOS }}" + git_home: "{{ retirement_service_home }}" diff --git a/playbooks/roles/user_retirement_pipeline/tasks/main.yml b/playbooks/roles/user_retirement_pipeline/tasks/main.yml new file mode 100644 index 00000000000..c532c2f3d0c --- /dev/null +++ b/playbooks/roles/user_retirement_pipeline/tasks/main.yml @@ -0,0 +1,79 @@ +- name: Create data and log directories + file: + path: "{{ item }}" + state: directory + owner: "{{ retirement_service_user }}" + group: "{{ common_web_group }}" + mode: 0755 + with_items: + - "{{ retirement_service_data_dir }}/learners_to_retire" + - "{{ retirement_service_log_dir }}" + tags: + - install + - install:configuration + +- name: Install python3 + apt: + name: "{{ item }}" + with_items: + - python3-pip + - python3-dev + tags: + - install + - install:requirements + +- name: Build virtualenv with python3 + command: "virtualenv --python=python3 {{ retirement_service_venv_dir }}" + args: + creates: "{{ retirement_service_venv_dir }}/bin/pip" + become_user: "{{ retirement_service_user }}" + tags: + - install + - install:requirements + +- name: Pin pip to a specific version. + command: "{{ retirement_service_venv_dir }}/bin/pip install pip=={{ retirement_service_pip_version }}" + become_user: "{{ retirement_service_user }}" + tags: + - install + - install:requirements + +- name: Install python requirements + command: pip install -r requirements.txt + args: + chdir: "{{ retirement_service_app_dir }}" + become_user: "{{ retirement_service_user }}" + environment: "{{ retirement_service_environment }}" + tags: + - install + - install:requirements + +- name: Generate configuration file for retirement service + template: + src: "config.yml.j2" + dest: "{{ retirement_service_home }}/config.yml" + owner: "{{ retirement_service_user }}" + group: "{{ common_web_group }}" + tags: + - install + - install:configuration + +- name: Set up user retirement script + template: + src: "retire_users.sh.j2" + dest: "{{ retirement_service_home }}/retire_users.sh" + mode: 0750 + owner: "{{ retirement_service_user }}" + group: "{{ common_web_group }}" + tags: + - install + - install:configuration + +- name: Install cron job for automatically running User Retirement + cron: + name: "Run user retirement pipeline" + job: "{{retirement_service_home}}/retire_users.sh" + hour: "{{ RETIREMENT_SERVICE_CRON_JOB_HOURS }}" + minute: "{{ RETIREMENT_SERVICE_CRON_JOB_MINUTES }}" + day: "*" + when: RETIREMENT_SERVICE_ENABLE_CRON_JOB diff --git a/playbooks/roles/user_retirement_pipeline/templates/config.yml.j2 b/playbooks/roles/user_retirement_pipeline/templates/config.yml.j2 new file mode 100644 index 00000000000..31a6eedf18a --- /dev/null +++ b/playbooks/roles/user_retirement_pipeline/templates/config.yml.j2 @@ -0,0 +1,12 @@ +client_id: {{ RETIREMENT_SERVICE_EDX_OAUTH2_KEY }} +client_secret: {{ RETIREMENT_SERVICE_EDX_OAUTH2_SECRET }} + +base_urls: + lms: {{ RETIREMENT_LMS_BASE_URL }} + ecommerce: {{ RETIREMENT_ECOMMERCE_BASE_BASE_URL }} + credentials: {{ RETIREMENT_CREDENTIALS_BASE_URL }} + +retirement_pipeline: + {% for item in RETIREMENT_SERVICE_PIPELINE_CONFIGURATION %} + - ['{{ item.NAME }}', '{{ item.NAME_COMPLETE }}', '{{ item.SERVICE }}', '{{ item.FUNCTION }}'] + {% endfor %} diff --git a/playbooks/roles/user_retirement_pipeline/templates/retire_users.sh.j2 b/playbooks/roles/user_retirement_pipeline/templates/retire_users.sh.j2 new file mode 100644 index 00000000000..d9441b20ef8 --- /dev/null +++ b/playbooks/roles/user_retirement_pipeline/templates/retire_users.sh.j2 @@ -0,0 +1,43 @@ +#!/bin/bash + +# Log to console and to file +LOG_LOCATION={{retirement_service_log_dir}} +TIMESTAMP=$(date -u) +exec > >(tee -i "$LOG_LOCATION/retirement-logs-[$TIMESTAMP].log") +exec 2>&1 +echo "Writing logs to: [ $LOG_LOCATION ]" + +# Ensure only one instance of the retirement script is running +LOCKFILE={{retirement_service_data_dir }}/retirement.lock +if [ -e ${LOCKFILE} ] && kill -0 `cat ${LOCKFILE}`; then + echo "Retirement Pipeline already running!" + exit +fi +# make sure the lockfile is removed when we exit and then claim it +trap "rm -f ${LOCKFILE}; exit" INT TERM EXIT +echo $$ > ${LOCKFILE} + +# Source virtualenv +source {{retirement_service_venv_dir}}/bin/activate + +# List users that are ready to be deleted, after the specified cool off period +{{retirement_service_script_path}}/get_learners_to_retire.py \ + --config_file={{retirement_service_home}}/config.yml \ + --output_dir={{retirement_service_data_dir }}/learners_to_retire \ + --cool_off_days={{RETIREMENT_SERVICE_COOL_OFF_DAYS}} + +# Loop over users and run deletion pipeline +unset $RETIREMENT_USERNAME +for filename in {{retirement_service_data_dir }}/learners_to_retire/*; do + [ -e "$filename" ] || continue + . $filename + echo "Deleting user: $RETIREMENT_USERNAME..." + {{retirement_service_script_path}}/retire_one_learner.py \ + --config_file={{retirement_service_home}}/config.yml \ + --username=$RETIREMENT_USERNAME + unset $RETIREMENT_USERNAME + rm -f $filename +done + +# Clear the lockfile +rm -f $LOCKFILE diff --git a/playbooks/roles/veda_delivery_worker/meta/main.yml b/playbooks/roles/veda_delivery_worker/meta/main.yml index 3c3108352bc..56950d0f6b4 100644 --- a/playbooks/roles/veda_delivery_worker/meta/main.yml +++ b/playbooks/roles/veda_delivery_worker/meta/main.yml @@ -15,9 +15,9 @@ dependencies: - supervisor - role: video_pipeline_base video_pipeline_base_service_name: '{{ veda_delivery_worker_service_name }}' - - role: edx_service - edx_service_name: "{{ veda_delivery_worker_service_name }}" - edx_service_config: "{{ veda_delivery_worker_service_config }}" - edx_service_repos: "{{ VEDA_DELIVERY_WORKER_REPOS }}" - edx_service_user: "{{ veda_delivery_worker_user }}" - edx_service_home: "{{ veda_delivery_worker_home }}" + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ veda_delivery_worker_service_name }}" + edx_service_with_rendered_config_service_config: "{{ veda_delivery_worker_service_config }}" + edx_service_with_rendered_config_repos: "{{ VEDA_DELIVERY_WORKER_REPOS }}" + edx_service_with_rendered_config_user: "{{ veda_delivery_worker_user }}" + edx_service_with_rendered_config_home: "{{ veda_delivery_worker_home }}" diff --git a/playbooks/roles/veda_encode_worker/meta/main.yml b/playbooks/roles/veda_encode_worker/meta/main.yml index 4ad79f98ebd..74ecad7ab26 100644 --- a/playbooks/roles/veda_encode_worker/meta/main.yml +++ b/playbooks/roles/veda_encode_worker/meta/main.yml @@ -17,9 +17,9 @@ dependencies: - role: video_pipeline_base video_pipeline_base_service_name: '{{ veda_encode_worker_service_name }}' video_pipeline_base_config_extra: '{{ veda_encode_worker_config_extra }}' - - role: edx_service - edx_service_name: "{{ veda_encode_worker_service_name }}" - edx_service_config: "{{ veda_encode_worker_service_config }}" - edx_service_repos: "{{ VEDA_ENCODE_WORKER_REPOS }}" - edx_service_user: "{{ veda_encode_worker_user }}" - edx_service_home: "{{ veda_encode_worker_home }}" + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ veda_encode_worker_service_name }}" + edx_service_with_rendered_config_service_config: "{{ veda_encode_worker_service_config }}" + edx_service_with_rendered_config_repos: "{{ VEDA_ENCODE_WORKER_REPOS }}" + edx_service_with_rendered_config_user: "{{ veda_encode_worker_user }}" + edx_service_with_rendered_config_home: "{{ veda_encode_worker_home }}" diff --git a/playbooks/roles/veda_pipeline_worker/meta/main.yml b/playbooks/roles/veda_pipeline_worker/meta/main.yml index a2d08082e50..3fa6c13fa92 100644 --- a/playbooks/roles/veda_pipeline_worker/meta/main.yml +++ b/playbooks/roles/veda_pipeline_worker/meta/main.yml @@ -17,9 +17,9 @@ dependencies: - role: video_pipeline_base video_pipeline_base_service_name: '{{ veda_pipeline_worker_service_name }}' video_pipeline_base_config_extra: '{{ veda_pipeline_worker_config_extra }}' - - role: edx_service - edx_service_name: "{{ veda_pipeline_worker_service_name }}" - edx_service_config: "{{ veda_pipeline_worker_service_config }}" - edx_service_repos: "{{ VEDA_PIPELINE_WORKER_REPOS }}" - edx_service_user: "{{ veda_pipeline_worker_user }}" - edx_service_home: "{{ veda_pipeline_worker_home }}" + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ veda_pipeline_worker_service_name }}" + edx_service_with_rendered_config_service_config: "{{ veda_pipeline_worker_service_config }}" + edx_service_with_rendered_config_repos: "{{ VEDA_PIPELINE_WORKER_REPOS }}" + edx_service_with_rendered_config_user: "{{ veda_pipeline_worker_user }}" + edx_service_with_rendered_config_home: "{{ veda_pipeline_worker_home }}" diff --git a/playbooks/roles/veda_web_frontend/defaults/main.yml b/playbooks/roles/veda_web_frontend/defaults/main.yml index 487d1a4f459..b00890664d0 100644 --- a/playbooks/roles/veda_web_frontend/defaults/main.yml +++ b/playbooks/roles/veda_web_frontend/defaults/main.yml @@ -77,8 +77,7 @@ VEDA_WEB_FRONTEND_DEFAULT_DB_NAME: '{{ VIDEO_PIPELINE_BASE_DEFAULT_DB_NAME }}' VEDA_WEB_FRONTEND_MYSQL_HOST: '{{ VIDEO_PIPELINE_BASE_MYSQL_HOST }}' VEDA_WEB_FRONTEND_MYSQL_USER: '{{ VIDEO_PIPELINE_BASE_MYSQL_USER }}' VEDA_WEB_FRONTEND_MYSQL_PASSWORD: '{{ VIDEO_PIPELINE_BASE_MYSQL_PASSWORD }}' +VEDA_WEB_FRONTEND_MYSQLE_CONN_MAX_AGE: 60 VEDA_WEB_FRONTEND_OAUTH2_URL: '{{ VIDEO_PIPELINE_BASE_URL_ROOT }}/api/val/v0' VEDA_WEB_FRONTEND_LOGOUT_URL: '{{ VIDEO_PIPELINE_BASE_URL_ROOT }}/logout/' -VEDA_WEB_FRONTEND_SOCIAL_AUTH_EDX_OIDC_KEY: '{{ VIDEO_PIPELINE_BASE_SOCIAL_AUTH_EDX_OIDC_KEY | default("pipeline-key") }}' -VEDA_WEB_FRONTEND_SOCIAL_AUTH_EDX_OIDC_SECRET: '{{ VIDEO_PIPELINE_BASE_SOCIAL_AUTH_EDX_OIDC_SECRET | default("pipeline-secret") }}' diff --git a/playbooks/roles/veda_web_frontend/meta/main.yml b/playbooks/roles/veda_web_frontend/meta/main.yml index 7852d552c1a..cb2f17598c8 100644 --- a/playbooks/roles/veda_web_frontend/meta/main.yml +++ b/playbooks/roles/veda_web_frontend/meta/main.yml @@ -14,26 +14,27 @@ dependencies: - role: video_pipeline_base video_pipeline_base_service_name: '{{ veda_web_frontend_service_name }}' video_pipeline_base_config_extra: '{{ veda_web_frontend_config_extra }}' - - role: edx_django_service - edx_django_service_repo: '{{ VEDA_WEB_FRONTEND_REPO }}' - edx_django_service_version: '{{ VEDA_WEB_FRONTEND_VERSION }}' - edx_django_service_name: '{{ veda_web_frontend_service_name }}' - edx_django_service_user: '{{ veda_web_frontend_user }}' - edx_django_service_debian_pkgs_extra: '{{ veda_web_frontend_debian_pkgs }}' - edx_django_service_gunicorn_port: '{{ VEDA_WEB_FRONTEND_GUNICORN_PORT }}' - edx_django_service_django_settings_module: '{{ VEDA_WEB_FRONTEND_DJANGO_SETTINGS_MODULE }}' - edx_django_service_environment_extra: '{{ VEDA_WEB_FRONTEND_ENVIRONMENT }}' - edx_django_service_wsgi_name: 'VEDA' - edx_django_service_nginx_port: '{{ VEDA_WEB_FRONTEND_NGINX_PORT }}' - edx_django_service_ssl_nginx_port: '{{ VEDA_WEB_FRONTEND_SSL_NGINX_PORT }}' - edx_django_service_secret_key: '{{ VEDA_WEB_FRONTEND_SECRET_KEY }}' - edx_django_service_staticfiles_storage: '{{ VEDA_WEB_FRONTEND_STATICFILES_STORAGE }}' - edx_django_service_media_storage_backend: '{{ VEDA_WEB_FRONTEND_MEDIA_STORAGE_BACKEND }}' - edx_django_service_memcache: '{{ VEDA_WEB_FRONTEND_MEMCACHE }}' - edx_django_service_default_db_host: '{{ VEDA_WEB_FRONTEND_MYSQL_HOST }}' - edx_django_service_default_db_name: '{{ VEDA_WEB_FRONTEND_DEFAULT_DB_NAME }}' - edx_django_service_db_user: '{{ VEDA_WEB_FRONTEND_MYSQL_USER }}' - edx_django_service_db_password: '{{ VEDA_WEB_FRONTEND_MYSQL_PASSWORD }}' - edx_django_service_use_python3: false - edx_django_service_config: '{{ VEDA_WEB_FRONTEND_SERVICE_CONFIG }}' - edx_django_service_post_migrate_commands: '{{ veda_web_frontend_post_migrate_commands }}' + - role: edx_django_service_with_rendered_config + edx_django_service_with_rendered_config_repo: '{{ VEDA_WEB_FRONTEND_REPO }}' + edx_django_service_with_rendered_config_version: '{{ VEDA_WEB_FRONTEND_VERSION }}' + edx_django_service_with_rendered_config_service_name: '{{ veda_web_frontend_service_name }}' + edx_django_service_with_rendered_config_user: '{{ veda_web_frontend_user }}' + edx_django_service_with_rendered_config_debian_pkgs_extra: '{{ veda_web_frontend_debian_pkgs }}' + edx_django_service_with_rendered_config_gunicorn_port: '{{ VEDA_WEB_FRONTEND_GUNICORN_PORT }}' + edx_django_service_with_rendered_config_django_settings_module: '{{ VEDA_WEB_FRONTEND_DJANGO_SETTINGS_MODULE }}' + edx_django_service_with_rendered_config_environment_extra: '{{ VEDA_WEB_FRONTEND_ENVIRONMENT }}' + edx_django_service_with_rendered_config_wsgi_name: 'VEDA' + edx_django_service_with_rendered_config_nginx_port: '{{ VEDA_WEB_FRONTEND_NGINX_PORT }}' + edx_django_service_with_rendered_config_ssl_nginx_port: '{{ VEDA_WEB_FRONTEND_SSL_NGINX_PORT }}' + edx_django_service_with_rendered_config_secret_key: '{{ VEDA_WEB_FRONTEND_SECRET_KEY }}' + edx_django_service_with_rendered_config_staticfiles_storage: '{{ VEDA_WEB_FRONTEND_STATICFILES_STORAGE }}' + edx_django_service_with_rendered_config_media_storage_backend: '{{ VEDA_WEB_FRONTEND_MEDIA_STORAGE_BACKEND }}' + edx_django_service_with_rendered_config_memcache: '{{ VEDA_WEB_FRONTEND_MEMCACHE }}' + edx_django_service_with_rendered_config_default_db_host: '{{ VEDA_WEB_FRONTEND_MYSQL_HOST }}' + edx_django_service_with_rendered_config_default_db_name: '{{ VEDA_WEB_FRONTEND_DEFAULT_DB_NAME }}' + edx_django_service_with_rendered_config_db_user: '{{ VEDA_WEB_FRONTEND_MYSQL_USER }}' + edx_django_service_with_rendered_config_db_password: '{{ VEDA_WEB_FRONTEND_MYSQL_PASSWORD }}' + edx_django_service_default_db_conn_max_age: '{{ VEDA_WEB_FRONTEND_MYSQL_CONN_MAX_AGE }}' + edx_django_service_with_rendered_config_use_python3: false + edx_django_service_with_rendered_config_service_config: '{{ VEDA_WEB_FRONTEND_SERVICE_CONFIG }}' + edx_django_service_with_rendered_config_post_migrate_commands: '{{ veda_web_frontend_post_migrate_commands }}' diff --git a/playbooks/roles/vhost/tasks/main.yml b/playbooks/roles/vhost/tasks/main.yml index 15220ea42a1..5eec3ce06e6 100644 --- a/playbooks/roles/vhost/tasks/main.yml +++ b/playbooks/roles/vhost/tasks/main.yml @@ -31,6 +31,15 @@ mode: "{{ item.value.mode }}" with_dict: "{{ vhost_dirs }}" +- name: Add script for syncing logs on exit + template: + src: sync-logs-on-exit.j2 + dest: "{{ COMMON_OBJECT_STORE_LOG_SYNC_ON_EXIT }}" + owner: root + group: root + mode: 0744 + when: COMMON_OBJECT_STORE_LOG_SYNC + - name: Force logrotate on supervisor stop template: src: etc/init/sync-on-stop.conf.j2 @@ -39,6 +48,27 @@ group: root mode: 0644 when: COMMON_OBJECT_STORE_LOG_SYNC + and (ansible_distribution_release == 'precise' or ansible_distribution_release == 'trusty') + +# With systemd, log rotation should be run before the `rsyslog` service exits +# to ensure that all logs are compressed and synced before shutting down the server. +- name: Add systemd service for syncing logs on exit + template: + src: etc/systemd/system/sync-logs-on-exit.service.j2 + dest: /etc/systemd/system/sync-logs-on-exit.service + owner: root + group: root + mode: 0644 + when: COMMON_OBJECT_STORE_LOG_SYNC + and not (ansible_distribution_release == 'precise' or ansible_distribution_release == 'trusty') + +- name: Enable systemd service for syncing logs on exit + systemd: + name: sync-logs-on-exit + enabled: yes + daemon_reload: yes + when: COMMON_OBJECT_STORE_LOG_SYNC + and not (ansible_distribution_release == 'precise' or ansible_distribution_release == 'trusty') - name: Update /etc/dhcp/dhclient.conf template: @@ -84,7 +114,7 @@ line: "PasswordAuthentication {{ COMMON_SSH_PASSWORD_AUTH }}" register: sshd_config_line2 -- name: Restart ssh +- name: Restart ssh (ubuntu/debian) service: name: ssh state: restarted @@ -93,7 +123,7 @@ (sshd_config_line1.changed or sshd_config_line2.changed) and ansible_distribution in common_debian_variants -- name: Restart ssh +- name: Restart ssh (redhat) service: name: sshd state: restarted diff --git a/playbooks/roles/vhost/templates/etc/init/sync-on-stop.conf.j2 b/playbooks/roles/vhost/templates/etc/init/sync-on-stop.conf.j2 index 783417c846f..f3117d47d21 100644 --- a/playbooks/roles/vhost/templates/etc/init/sync-on-stop.conf.j2 +++ b/playbooks/roles/vhost/templates/etc/init/sync-on-stop.conf.j2 @@ -1,6 +1,5 @@ start on stopped supervisor description "sync tracking logs on supervisor shutdown" script - /usr/sbin/logrotate -f /etc/logrotate.d/hourly/tracking.log - /usr/sbin/logrotate -f /etc/logrotate.d/hourly/edx-services + "{{ COMMON_OBJECT_STORE_LOG_SYNC_ON_EXIT }}" end script diff --git a/playbooks/roles/vhost/templates/etc/systemd/system/sync-logs-on-exit.service.j2 b/playbooks/roles/vhost/templates/etc/systemd/system/sync-logs-on-exit.service.j2 new file mode 100644 index 00000000000..7e171484104 --- /dev/null +++ b/playbooks/roles/vhost/templates/etc/systemd/system/sync-logs-on-exit.service.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=Synchronizes logs on exit +After=rsyslog.service + +[Service] +Type=oneshot +RemainAfterExit=true +ExecStop=/edx/bin/sync-logs-on-exit + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/roles/vhost/templates/sync-logs-on-exit.j2 b/playbooks/roles/vhost/templates/sync-logs-on-exit.j2 new file mode 100644 index 00000000000..9629744538a --- /dev/null +++ b/playbooks/roles/vhost/templates/sync-logs-on-exit.j2 @@ -0,0 +1,4 @@ +#!/bin/bash + +/usr/sbin/logrotate -f /etc/logrotate.d/hourly/tracking.log +/usr/sbin/logrotate -f /etc/logrotate.d/hourly/edx-services diff --git a/playbooks/roles/video_pipeline_base/defaults/main.yml b/playbooks/roles/video_pipeline_base/defaults/main.yml index 1517e37ee50..52fead31178 100644 --- a/playbooks/roles/video_pipeline_base/defaults/main.yml +++ b/playbooks/roles/video_pipeline_base/defaults/main.yml @@ -69,14 +69,11 @@ VIDEO_PIPELINE_BASE_VEDA_NOREPLY_EMAIL: "veda-noreply@example.com" VIDEO_PIPELINE_BASE_CIELO24_API_ENVIRONMENT: "sandbox" VIDEO_PIPELINE_BASE_TRANSCRIPT_PROVIDER_REQUEST_TOKEN: "SET-ME-TO-A-UNIQUE-LONG-RANDOM-STRING" -VIDEO_PIPELINE_BASE_SOCIAL_AUTH_EDX_OIDC_KEY: "pipeline-key" -VIDEO_PIPELINE_BASE_SOCIAL_AUTH_EDX_OIDC_SECRET: "pipeline-secret" - VIDEO_PIPELINE_BASE_VAL_API_URL: "{{ video_pipeline_base_val_base_url }}/api/val/v0/videos" VIDEO_PIPELINE_BASE_VAL_TOKEN_URL: "{{ video_pipeline_base_val_base_url }}/oauth2/access_token" VIDEO_PIPELINE_BASE_VAL_VIDEO_IMAGES_URL: "{{ video_pipeline_base_val_base_url }}/api/val/v0/videos/video-images/update/" -VIDEO_PIPELINE_BASE_VAL_CLIENT_ID: "{{ VIDEO_PIPELINE_BASE_SOCIAL_AUTH_EDX_OIDC_KEY }}" -VIDEO_PIPELINE_BASE_VAL_SECRET_KEY: "{{ VIDEO_PIPELINE_BASE_SOCIAL_AUTH_EDX_OIDC_SECRET }}" +VIDEO_PIPELINE_BASE_VAL_CLIENT_ID: "{{ VIDEO_PIPELINE_BASE_OAUTH_CLIENT_ID }}" +VIDEO_PIPELINE_BASE_VAL_SECRET_KEY: "{{ VIDEO_PIPELINE_BASE_OAUTH_CLIENT_SECRET }}" VIDEO_PIPELINE_BASE_VAL_USERNAME: "staff" VIDEO_PIPELINE_BASE_VAL_PASSWORD: "edx" VIDEO_PIPELINE_BASE_VAL_TRANSCRIPT_CREATE_URL: "{{ video_pipeline_base_val_base_url }}/api/val/v0/videos/video-transcripts/create/" diff --git a/playbooks/roles/whitelabel/tasks/main.yml b/playbooks/roles/whitelabel/tasks/main.yml index 82ea4679d29..a1484737db2 100644 --- a/playbooks/roles/whitelabel/tasks/main.yml +++ b/playbooks/roles/whitelabel/tasks/main.yml @@ -36,15 +36,3 @@ become_user: "{{ ecommerce_user }}" environment: "{{ ecommerce_environment }}" -# Journals - -- name: Create Sites and Partners for Journals - shell: > - {{ COMMON_BIN_DIR }}/python.journals {{ COMMON_BIN_DIR }}/manage.journals create_themed_sites - --settings={{ JOURNALS_DJANGO_SETTINGS_MODULE }} - --dns-name {{ dns_name }} - --theme-path {{ WHITELABEL_THEME_DIR }} - --port "80" - become_user: "{{ journals_user }}" - environment: "{{ journals_environment }}" - when: "{{ JOURNALS_ENABLED }}" diff --git a/playbooks/roles/xqueue/defaults/main.yml b/playbooks/roles/xqueue/defaults/main.yml index 29c63b9bf23..0c31098fe82 100644 --- a/playbooks/roles/xqueue/defaults/main.yml +++ b/playbooks/roles/xqueue/defaults/main.yml @@ -4,6 +4,7 @@ XQUEUE_SETTINGS: 'production' XQUEUE_NGINX_PORT: 18040 +XQUEUE_NGINX_SSL_PORT: 18041 XQUEUE_GUNICORN_WORKERS_EXTRA: "" XQUEUE_GUNICORN_WORKERS_EXTRA_CONF: "" @@ -44,7 +45,7 @@ XQUEUE_SWIFT_REGION_NAME: !!null XQUEUE_BASIC_AUTH_USER: "{{ COMMON_HTPASSWD_USER }}" XQUEUE_BASIC_AUTH_PASSWORD: "{{ COMMON_HTPASSWD_PASS }}" XQUEUE_DJANGO_USERS: - lms: 'password' + lms: "{{ COMMON_XQUEUE_LMS_PASSWORD }}" XQUEUE_LANG: 'en_US.UTF-8' XQUEUE_MYSQL_DB_NAME: 'xqueue' @@ -76,6 +77,9 @@ XQUEUE_SESSION_COOKIE_SECURE: false # you a custom setting. XQUEUE_CLOUDWATCH_QUEUE_COUNT_METRICS: !!null +# Remote config +XQUEUE_HERMES_ENABLED: "{{ COMMON_HERMES_ENABLED }}" + # This block of config is dropped into /edx/etc/xqueue.yml # and is read in by xqueue.XQUEUE_SETTINGS XQUEUE_CONFIG: @@ -169,7 +173,14 @@ xqueue_debian_pkgs: # (only needed if wheel files aren't available) - build-essential - s3cmd + # mysqlclient wont install without this + - libssl-dev - libmysqlclient-dev - ntp # Needed to be able to create the xqueue mysqldb. - python-mysqldb + # Needed to be able to build wheel for mysqlclient + - python3-dev + +# flag to run xqueue on python3 +xqueue_use_python3: true diff --git a/playbooks/roles/xqueue/meta/main.yml b/playbooks/roles/xqueue/meta/main.yml index 349934b357b..267c1460798 100644 --- a/playbooks/roles/xqueue/meta/main.yml +++ b/playbooks/roles/xqueue/meta/main.yml @@ -2,13 +2,20 @@ dependencies: - common - supervisor - - role: edx_service - edx_service_name: "{{ xqueue_service_name }}" - edx_service_config: "{{ XQUEUE_CONFIG }}" - edx_service_config_filter_nones: true - edx_service_repos: "{{ XQUEUE_REPOS }}" - edx_service_user: "{{ xqueue_user }}" - edx_service_home: "{{ xqueue_home }}" - edx_service_packages: + - role: supervisor + supervisor_spec: + - service: "{{ xqueue_service_name }}" + migration_check_services: "{{ xqueue_service_name }}" + python: "python.{{ xqueue_service_name }}" + code: "{{ xqueue_code_dir | default(None) }}" + env: "{{ xqueue_app_dir | default(none) }}/xqueue_env" + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ xqueue_service_name }}" + edx_service_with_rendered_config_service_config: "{{ XQUEUE_CONFIG }}" + edx_service_with_rendered_config_filter_nones: true + edx_service_with_rendered_config_repos: "{{ XQUEUE_REPOS }}" + edx_service_with_rendered_config_user: "{{ xqueue_user }}" + edx_service_with_rendered_config_home: "{{ xqueue_home }}" + edx_service_with_rendered_config_packages: debian: "{{ xqueue_debian_pkgs }}" redhat: [] diff --git a/playbooks/roles/xqueue/tasks/main.yml b/playbooks/roles/xqueue/tasks/main.yml index c4e0964fe90..93c597331ac 100644 --- a/playbooks/roles/xqueue/tasks/main.yml +++ b/playbooks/roles/xqueue/tasks/main.yml @@ -1,4 +1,51 @@ --- +######## BEGIN PYTHON3 ######## +- name: install python3 + apt: + name: "{{ item }}" + when: xqueue_use_python3 + with_items: + - python3-pip + - python3-dev + tags: + - install + - install:system-requirements + +- name: build virtualenv with python3 + command: "virtualenv --python=python3 {{ xqueue_venv_dir }}" + args: + creates: "{{ xqueue_venv_dir }}/bin/pip" + become_user: "{{ xqueue_user }}" + when: xqueue_use_python3 + tags: + - install + - install:system-requirements + +- name: "Install python3 requirements" + pip: + requirements: "{{ xqueue_requirements_file }}" + virtualenv: "{{ xqueue_venv_dir }}" + virtualenv_python: 'python3.5' + state: present + extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w" + become_user: "{{ xqueue_user }}" + when: xqueue_use_python3 + tags: + - install + - install:app-requirements + +######## END PYTHON3 ######## + +- name: build virtualenv with python2.7 + command: "virtualenv --python=python2.7 {{ xqueue_venv_dir }}" + args: + creates: "{{ xqueue_venv_dir }}/bin/pip" + become_user: "{{ xqueue_user }}" + when: not xqueue_use_python3 + tags: + - install + - install:system-requirements + - name: write devstack script template: src: "devstack.sh.j2" @@ -70,6 +117,7 @@ state: present extra_args: "-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w" become_user: "{{ xqueue_user }}" + when: not xqueue_use_python3 tags: - install - install:app-requirements diff --git a/playbooks/roles/xqueue/templates/devstack.sh.j2 b/playbooks/roles/xqueue/templates/devstack.sh.j2 index 07d340fffed..31b6eb4b9b3 100644 --- a/playbooks/roles/xqueue/templates/devstack.sh.j2 +++ b/playbooks/roles/xqueue/templates/devstack.sh.j2 @@ -5,10 +5,24 @@ COMMAND=$1 case $COMMAND in + start) + /edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf + ;; open) . {{ xqueue_venv_bin }}/activate cd {{ xqueue_code_dir }} /bin/bash ;; + exec) + shift + + . {{ xqueue_venv_bin }}/activate + cd {{ xqueue_code_dir }} + + "$@" + ;; + *) + "$@" + ;; esac diff --git a/playbooks/roles/xqueue/templates/xqueue_gunicorn.py.j2 b/playbooks/roles/xqueue/templates/xqueue_gunicorn.py.j2 index c78b66a69fe..7802ee62d56 100644 --- a/playbooks/roles/xqueue/templates/xqueue_gunicorn.py.j2 +++ b/playbooks/roles/xqueue/templates/xqueue_gunicorn.py.j2 @@ -16,6 +16,8 @@ workers = {{ XQUEUE_WORKERS }} workers = (multiprocessing.cpu_count()-1) * 2 + 2 {% endif %} +{{ common_pre_request }} + {{ common_close_all_caches }} def post_fork(server, worker): diff --git a/playbooks/roles/xqwatcher/defaults/main.yml b/playbooks/roles/xqwatcher/defaults/main.yml index 7029a119635..a2f6fe34a41 100644 --- a/playbooks/roles/xqwatcher/defaults/main.yml +++ b/playbooks/roles/xqwatcher/defaults/main.yml @@ -74,6 +74,7 @@ XQWATCHER_REPOS: # and a key being provided via NEWRELIC_LICENSE_KEY XQWATCHER_NEWRELIC_APPNAME: "{{ COMMON_DEPLOYMENT }}-{{ COMMON_ENVIRONMENT }}-xqwatcher" XQWATCHER_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}" + # # # vars are namespace with the module name. @@ -89,9 +90,6 @@ xqwatcher_repo_name: xqueue-watcher.git #TODO: change this to /edx/etc after pulling xqwatcher.json out xqwatcher_conf_dir: "{{ xqwatcher_app_dir }}" -#TODO: remove after refactoring out all the git stuff -xqwatcher_course_git_ssh_opts: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ xqwatcher_app_dir }}/.ssh/{{ xqwatcher_service_name }}-courses" - xqwatcher_requirements_file: "{{ xqwatcher_code_dir }}/requirements/production.txt" xqwatcher_log_dir: "{{ COMMON_LOG_DIR }}/{{ xqwatcher_service_name }}" xqwatcher_module: "xqueue_watcher" diff --git a/playbooks/roles/xqwatcher/meta/main.yml b/playbooks/roles/xqwatcher/meta/main.yml index 6361854e63a..1c8f0b9d238 100644 --- a/playbooks/roles/xqwatcher/meta/main.yml +++ b/playbooks/roles/xqwatcher/meta/main.yml @@ -15,11 +15,11 @@ dependencies: - common - role: supervisor - - role: edx_service - edx_service_name: "{{ xqwatcher_service_name }}" - edx_service_repos: "{{ XQWATCHER_REPOS }}" - edx_service_user: "{{ xqwatcher_user }}" - edx_service_home: "{{ xqwatcher_app_dir }}" - edx_service_packages: + - role: edx_service_with_rendered_config + edx_service_with_rendered_config_service_name: "{{ xqwatcher_service_name }}" + edx_service_with_rendered_config_repos: "{{ XQWATCHER_REPOS }}" + edx_service_with_rendered_config_user: "{{ xqwatcher_user }}" + edx_service_with_rendered_config_home: "{{ xqwatcher_app_dir }}" + edx_service_with_rendered_config_packages: debian: "{{ xqwatcher_debian_pkgs }}" redhat: "{{ xqwatcher_redhat_pkgs }}" diff --git a/playbooks/roles/xqwatcher/tasks/deploy_courses.yml b/playbooks/roles/xqwatcher/tasks/deploy_courses.yml index 45fa7d1edf4..5fc741610a6 100644 --- a/playbooks/roles/xqwatcher/tasks/deploy_courses.yml +++ b/playbooks/roles/xqwatcher/tasks/deploy_courses.yml @@ -7,7 +7,8 @@ repo: "{{ item.GIT_REPO }}" dest: "{{ xqwatcher_app_dir }}/data/{{ item.COURSE }}" version: "{{ item.GIT_REF }}" - ssh_opts: "{{ xqwatcher_course_git_ssh_opts }}" + accept_hostkey: yes + key_file: "{{ xqwatcher_app_dir }}/.ssh/{{ xqwatcher_service_name }}-courses" with_items: "{{ XQWATCHER_COURSES }}" tags: - install diff --git a/playbooks/roles/xserver/defaults/main.yml b/playbooks/roles/xserver/defaults/main.yml index d2b082ea690..aeefd0503b3 100644 --- a/playbooks/roles/xserver/defaults/main.yml +++ b/playbooks/roles/xserver/defaults/main.yml @@ -43,11 +43,13 @@ xserver_env_config: xserver_source_repo: "git://github.com/edx/xserver.git" # This should probably be overridden in the playbook or groupvars # with the default pointing to the head of master. -xserver_version: master -xserver_grader_version: master +XSERVER_VERSION: master +xserver_version: "{{ XSERVER_VERSION }}" +XSERVER_GRADER_VERSION: master +xserver_grader_version: "{{ XSERVER_GRADER_VERSION }}" -xserver_requirements_file: "{{ xserver_code_dir }}/requirements.txt" +xserver_requirements_file: "{{ xserver_code_dir }}/requirements/base.txt" xserver_gunicorn_port: 8050 xserver_gunicorn_host: 'localhost' diff --git a/playbooks/roles/xserver/tasks/deploy.yml b/playbooks/roles/xserver/tasks/deploy.yml index 661db23d125..7f288939539 100644 --- a/playbooks/roles/xserver/tasks/deploy.yml +++ b/playbooks/roles/xserver/tasks/deploy.yml @@ -62,7 +62,8 @@ template: src: xserver.env.json.j2 dest: "{{ xserver_app_dir }}/env.json" - become_user: "{{ xserver_user }}" + owner: "{{ xserver_user }}" + group: "{{ common_web_group }}" notify: - restart xserver @@ -76,24 +77,14 @@ notify: - restart xserver -- name: Upload ssh script - template: - src: git_ssh.sh.j2 - dest: "/tmp/git_ssh.sh" - owner: "{{ xserver_user }}" - mode: "0750" - notify: - - restart xserver - - name: Checkout grader code git: dest: "{{ XSERVER_GRADER_DIR }}" repo: "{{ XSERVER_GRADER_SOURCE }}" version: "{{ xserver_grader_version }}" accept_hostkey: yes - environment: - GIT_SSH: "/tmp/git_ssh.sh" - notify: + key_file: "{{ xserver_git_identity }}" + notify: - restart xserver register: xserver_grader_checkout become_user: "{{ xserver_user }}" diff --git a/playbooks/roles/xserver/tasks/ec2.yml b/playbooks/roles/xserver/tasks/ec2.yml index 3c1efd3018a..3af6a7c1cd9 100644 --- a/playbooks/roles/xserver/tasks/ec2.yml +++ b/playbooks/roles/xserver/tasks/ec2.yml @@ -1,6 +1,6 @@ --- - name: Get instance information - action: ec2_facts + action: ec2_metadata_facts - name: Tag instance for xserver ec2_tag: diff --git a/playbooks/roles/xserver/tasks/main.yml b/playbooks/roles/xserver/tasks/main.yml index 03dff21658e..88accff9af2 100644 --- a/playbooks/roles/xserver/tasks/main.yml +++ b/playbooks/roles/xserver/tasks/main.yml @@ -51,9 +51,8 @@ - name: Install system dependencies of xserver apt: - name: "{{ item }}" + name: "{{ xserver_debian_pkgs }}" state: present - with_items: "{{ xserver_debian_pkgs }}" - name: Load python-sandbox apparmor profile template: diff --git a/playbooks/roles/xserver/templates/git_ssh.sh.j2 b/playbooks/roles/xserver/templates/git_ssh.sh.j2 deleted file mode 100644 index cc93d2f3462..00000000000 --- a/playbooks/roles/xserver/templates/git_ssh.sh.j2 +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec /usr/bin/ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ xserver_git_identity }} "$@" diff --git a/playbooks/roles/xserver/templates/xserver.yml.j2 b/playbooks/roles/xserver/templates/xserver.yml.j2 new file mode 100644 index 00000000000..150b4354884 --- /dev/null +++ b/playbooks/roles/xserver/templates/xserver.yml.j2 @@ -0,0 +1 @@ +{{ xserver_env_config | to_nice_yaml }} diff --git a/playbooks/sample_vars/passwords.yml b/playbooks/sample_vars/passwords.yml index 8ffcc5fbf91..7427d777e4d 100644 --- a/playbooks/sample_vars/passwords.yml +++ b/playbooks/sample_vars/passwords.yml @@ -8,10 +8,13 @@ ANALYTICS_PIPELINE_OUTPUT_DATABASE_PASSWORD: !!null ANALYTICS_SCHEDULE_MASTER_SSH_CREDENTIAL_PASSPHRASE: !!null COMMON_HTPASSWD_PASS: !!null COMMON_HTPASSWD_USER: !!null +PROSPECTUS_PREVIEW_COMMON_HTPASSWD_PASS: !!null +PROSPECTUS_PREVIEW_COMMON_HTPASSWD_USER: !!null COMMON_MONGO_READ_ONLY_PASS: !!null COMMON_MYSQL_ADMIN_PASS: !!null COMMON_MYSQL_MIGRATE_PASS: !!null COMMON_MYSQL_READ_ONLY_PASS: !!null +COMMON_ANALYTICS_MYSQL_READ_ONLY_PASS: !!null CREDENTIALS_MYSQL_PASSWORD: !!null DISCOVERY_EMAIL_HOST_PASSWORD: !!null DISCOVERY_MYSQL_PASSWORD: !!null @@ -88,3 +91,14 @@ RABBIT_USERS: password: "{{ XQUEUE_RABBITMQ_PASS }}" - name: 'celery' password: "{{ EDXAPP_CELERY_PASSWORD }}" + +# JWT-related settings +COMMON_JWT_AUDIENCE: !!null +COMMON_JWT_SECRET_KEY: !!null #SECRET_KEY +ECOMMERCE_WORKER_JWT_SECRET_KEY: !!null #SECRET_KEY + +FERNET_KEYS: +- !!null #SECRET_KEY +EDXAPP_RETIRED_USER_SALTS: +- !!null +COMMON_XQUEUE_LMS_PASSWORD: !!null diff --git a/playbooks/sample_vars/server_vars.yml b/playbooks/sample_vars/server_vars.yml index 40e95875311..764ee6e9b3b 100644 --- a/playbooks/sample_vars/server_vars.yml +++ b/playbooks/sample_vars/server_vars.yml @@ -91,7 +91,6 @@ #EDXAPP_YOUTUBE_API_KEY: "Your Youtube API Key" # #EDXAPP_FEATURES: -# AUTH_USE_OPENID_PROVIDER: true # ENABLE_DISCUSSION_SERVICE: true # ENABLE_DISCUSSION_HOME_PANEL: true # ENABLE_INSTRUCTOR_ANALYTICS: false @@ -100,6 +99,7 @@ # PREVIEW_LMS_BASE: "{{ EDXAPP_PREVIEW_LMS_BASE }}" # ENABLE_GRADE_DOWNLOADS: true # ENABLE_MKTG_SITE: "{{ EDXAPP_ENABLE_MKTG_SITE }}" +# ENABLE_PUBLISHER: "{{ EDXAPP_ENABLE_PUBLISHER }}" # AUTOMATIC_AUTH_FOR_TESTING: "{{ EDXAPP_ENABLE_AUTO_AUTH }}" # ENABLE_THIRD_PARTY_AUTH: "{{ EDXAPP_ENABLE_THIRD_PARTY_AUTH }}" # AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING: true @@ -120,6 +120,11 @@ # - "www.example.org" # - "{{ ECOMMERCE_ECOMMERCE_URL_ROOT }}" # +#EDXAPP_LOGIN_REDIRECT_WHITELIST: +# - "lms.example.org" +# - "studio.example.org" +# - "insights.example.org" +# #EDXAPP_VIDEO_UPLOAD_PIPELINE: # BUCKET: "your-video-bucket" # ROOT_PATH: "edx-video-upload-pipeline/unprocessed" diff --git a/playbooks/snort.yml b/playbooks/snort.yml index da98f718dd1..e2b75b32974 100644 --- a/playbooks/snort.yml +++ b/playbooks/snort.yml @@ -6,7 +6,8 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - snort - role: datadog when: COMMON_ENABLE_DATADOG diff --git a/playbooks/splunk.yml b/playbooks/splunk.yml index 18e5bf706ad..a4d149313ab 100644 --- a/playbooks/splunk.yml +++ b/playbooks/splunk.yml @@ -4,6 +4,5 @@ hosts: all become: True gather_facts: True - vars: roles: - splunk-server diff --git a/playbooks/splunk_config_backup.yml b/playbooks/splunk_config_backup.yml index 6a05a937ab5..0345b13792c 100644 --- a/playbooks/splunk_config_backup.yml +++ b/playbooks/splunk_config_backup.yml @@ -1,20 +1,19 @@ - name: Backup splunk configurations hosts: all - become: True vars: splunk_config_dir: "/opt/splunk/etc" splunk_host: "{{ splunk_host_id }}" splunk_config_archive: "{{ splunk_host }}-{{ date }}.tar.gz" - splunk_s3_backup_tempdir: "/var/tmp" + splunk_s3_backup_tempdir: "{{ splunk_backup_dir }}" splunk_s3_bucket: "{{ splunk_s3_backups_bucket }}" tasks: - set_fact: date: "{{ lookup('pipe', 'date +%Y-%m-%dT%H%M') }}" - name: archive splunk configuration dir - command: tar -cpzf "{{ splunk_s3_backup_tempdir }}/{{ splunk_config_archive }}" {{ splunk_config_dir }} + command: sudo tar -cpzf "{{ splunk_s3_backup_tempdir }}/{{ splunk_config_archive }}" {{ splunk_config_dir }} + register: tar_result + failed_when: tar_result.rc > 1 - name: copy tarball to s3 bucket - command: aws s3 cp "{{ splunk_s3_backup_tempdir }}/{{ splunk_config_archive }}" s3://{{ splunk_s3_bucket }} + command: sudo aws s3 cp "{{ splunk_s3_backup_tempdir }}/{{ splunk_config_archive }}" s3://{{ splunk_s3_bucket }} - name: cleanup backup file - file: - path: "{{ splunk_s3_backup_tempdir }}/{{ splunk_config_archive }}" - state: absent + shell: sudo rm "{{ splunk_s3_backup_tempdir }}/{{ splunk_config_archive }}" diff --git a/playbooks/tanaguru.yml b/playbooks/tanaguru.yml index f3ff6f3d356..2855a650150 100644 --- a/playbooks/tanaguru.yml +++ b/playbooks/tanaguru.yml @@ -3,6 +3,7 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - mysql - tanaguru diff --git a/playbooks/test-rolling.yml b/playbooks/test-rolling.yml index 9e742ca4354..a07f2a285fc 100644 --- a/playbooks/test-rolling.yml +++ b/playbooks/test-rolling.yml @@ -9,7 +9,7 @@ serial_count: 1 serial: "{{ serial_count }}" pre_tasks: - - action: ec2_facts + - action: ec2_metadata_facts when: elb_pre_post - debug: var: "{{ ansible_ec2_instance_id }}" diff --git a/playbooks/tools-gp.yml b/playbooks/tools-gp.yml index a9165d3b53c..d3338a6b93d 100644 --- a/playbooks/tools-gp.yml +++ b/playbooks/tools-gp.yml @@ -10,7 +10,9 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - ad_hoc_reporting + - ghost - role: splunkforwarder when: COMMON_ENABLE_SPLUNKFORWARDER diff --git a/playbooks/tools_jenkins.yml b/playbooks/tools_jenkins.yml index 142227c92b4..57848d95a7d 100644 --- a/playbooks/tools_jenkins.yml +++ b/playbooks/tools_jenkins.yml @@ -17,7 +17,8 @@ SECURITY_UPGRADE_ON_ANSIBLE: true serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE # jenkins_master role does extra tweaks to datadog if COMMON_ENABLE_DATADOG is set # so this needs to run early. - role: datadog diff --git a/playbooks/users.yml b/playbooks/users.yml new file mode 100644 index 00000000000..7f7e05180a5 --- /dev/null +++ b/playbooks/users.yml @@ -0,0 +1,12 @@ +# Simple playbook for creating/updating/removing users on a box +# If you run it against a box with automated users and don't pass them in it will break them +# ansible-playbook -i 'host.example.com,' ./tools-gp.yml -e@/var/path/users.yml -e@/vars/path/environnment-deployment.yml +- name: Update users + hosts: all + become: True + gather_facts: True + roles: + - role: user + user_info: "{{ COMMON_USER_INFO }}" + tags: + - users diff --git a/playbooks/veda_delivery_worker.yml b/playbooks/veda_delivery_worker.yml index 015a0b432e7..e6b2bb9523b 100644 --- a/playbooks/veda_delivery_worker.yml +++ b/playbooks/veda_delivery_worker.yml @@ -2,7 +2,8 @@ hosts: all gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - veda_delivery_worker - role: splunkforwarder when: COMMON_ENABLE_SPLUNKFORWARDER diff --git a/playbooks/veda_encode_worker.yml b/playbooks/veda_encode_worker.yml index 6fa3f3e3e75..c9e5aea08e9 100644 --- a/playbooks/veda_encode_worker.yml +++ b/playbooks/veda_encode_worker.yml @@ -3,7 +3,8 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - veda_encode_worker - role: splunkforwarder when: COMMON_ENABLE_SPLUNKFORWARDER diff --git a/playbooks/veda_pipeline_worker.yml b/playbooks/veda_pipeline_worker.yml index ec2903d3ff4..d370d6b5dd0 100644 --- a/playbooks/veda_pipeline_worker.yml +++ b/playbooks/veda_pipeline_worker.yml @@ -3,7 +3,8 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - veda_pipeline_worker - role: splunkforwarder when: COMMON_ENABLE_SPLUNKFORWARDER diff --git a/playbooks/veda_web_frontend.yml b/playbooks/veda_web_frontend.yml index 25e8781e019..0b2be285611 100644 --- a/playbooks/veda_web_frontend.yml +++ b/playbooks/veda_web_frontend.yml @@ -3,7 +3,8 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_default_sites: - veda_web_frontend diff --git a/playbooks/vpc_admin.yml b/playbooks/vpc_admin.yml index 05a843bcec8..be7360d32ad 100644 --- a/playbooks/vpc_admin.yml +++ b/playbooks/vpc_admin.yml @@ -5,7 +5,8 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - edx_ansible - user - jenkins_admin diff --git a/playbooks/whitelabel.yml b/playbooks/whitelabel.yml index a5a62a69804..d693c4a4585 100644 --- a/playbooks/whitelabel.yml +++ b/playbooks/whitelabel.yml @@ -10,9 +10,9 @@ - "roles/edxapp/defaults/main.yml" - "roles/discovery/defaults/main.yml" - "roles/ecommerce/defaults/main.yml" - - "roles/journals/defaults/main.yml" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - edxlocal - role: nginx nginx_default_sites: diff --git a/playbooks/worker.yml b/playbooks/worker.yml index ae203c9bd2d..ba7f5636739 100644 --- a/playbooks/worker.yml +++ b/playbooks/worker.yml @@ -3,7 +3,8 @@ become: True gather_facts: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: edxapp celery_worker: True - role: datadog @@ -14,5 +15,10 @@ when: COMMON_ENABLE_MINOS - role: datadog-uninstall when: not COMMON_ENABLE_DATADOG - - + - role: jwt_signature + when: CONFIGURE_JWTS + app_name: lms + app_config_file: "{{ COMMON_CFG_DIR }}/lms.yml" + app_config_owner: "{{ edxapp_user }}" + app_config_group: "{{ common_web_group }}" + app_config_mode: 0640 diff --git a/playbooks/xqueue.yml b/playbooks/xqueue.yml index de2c565155d..86c3213dd1d 100644 --- a/playbooks/xqueue.yml +++ b/playbooks/xqueue.yml @@ -2,7 +2,8 @@ hosts: all become: True roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: automated AUTOMATED_USERS: "{{ XQUEUE_AUTOMATED_USERS | default({}) }}" - role: nginx @@ -17,4 +18,7 @@ when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE - role: datadog-uninstall when: not COMMON_ENABLE_DATADOG + - role: hermes + HERMES_TARGET_SERVICE: 'xqueue' + when: XQUEUE_HERMES_ENABLED diff --git a/playbooks/xqwatcher.yml b/playbooks/xqwatcher.yml index 57c1461c38a..8798dda77cc 100644 --- a/playbooks/xqwatcher.yml +++ b/playbooks/xqwatcher.yml @@ -10,7 +10,8 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - xqwatcher - role: datadog when: COMMON_ENABLE_DATADOG @@ -18,4 +19,3 @@ when: COMMON_ENABLE_SPLUNKFORWARDER - role: datadog-uninstall when: not COMMON_ENABLE_DATADOG - diff --git a/playbooks/xserver.yml b/playbooks/xserver.yml index ba50766e418..327ab1eadb7 100644 --- a/playbooks/xserver.yml +++ b/playbooks/xserver.yml @@ -6,7 +6,8 @@ serial_count: 1 serial: "{{ serial_count }}" roles: - - aws + - role: aws + when: COMMON_ENABLE_AWS_ROLE - role: nginx nginx_sites: - xserver diff --git a/pre-requirements.txt b/pre-requirements.txt index a8dbe716d86..9e1ff379964 100644 --- a/pre-requirements.txt +++ b/pre-requirements.txt @@ -1 +1 @@ -pip==9.0.3 +pip==20.0.2 diff --git a/requirements.txt b/requirements.txt index ca977f46464..b4f055351f4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,50 +4,43 @@ # # make upgrade # -ansible==2.3.1.0 -asn1crypto==0.24.0 # via cryptography -awscli==1.15.19 -bcrypt==3.1.4 # via paramiko -boto3==1.7.14 -boto==2.48.0 +ansible==2.7.12 # via -r requirements/base.in +awscli==1.15.19 # via -r requirements/base.in +bcrypt==3.1.7 # via paramiko +boto3==1.7.14 # via -r requirements/base.in +boto==2.48.0 # via -r requirements/base.in botocore==1.10.19 # via awscli, boto3, s3transfer -certifi==2018.4.16 # via requests -cffi==1.11.5 # via bcrypt, cryptography, pynacl +certifi==2020.4.5.1 # via requests +cffi==1.14.0 # via bcrypt, cryptography, pynacl chardet==3.0.4 # via requests colorama==0.3.7 # via awscli -cryptography==2.2.2 # via paramiko -datadog==0.8.0 -decorator==4.3.0 # via datadog, networkx -docopt==0.6.2 -docutils==0.14 # via awscli, botocore -ecdsa==0.13 -enum34==1.1.6 # via cryptography -futures==3.2.0 ; python_version == "2.7" -idna==2.6 # via cryptography, requests -ipaddress==1.0.22 # via cryptography -jinja2==2.8 -jmespath==0.9.3 # via boto3, botocore -markupsafe==1.0 -mysql-python==1.2.5 -networkx==1.11 -paramiko==2.4.1 -pathlib2==2.3.0 -prettytable==0.7.2 -pyasn1==0.4.2 # via paramiko, rsa -pycparser==2.18 # via cffi -pycrypto==2.6.1 -pymongo==3.2.2 -pynacl==1.2.1 # via paramiko -python-dateutil==2.7.3 # via botocore -python-simple-hipchat==0.2 -pyyaml==3.12 -requests==2.18.4 +cryptography==2.9.2 # via ansible, paramiko +datadog==0.8.0 # via -r requirements/base.in +decorator==4.4.2 # via datadog, networkx +docopt==0.6.2 # via -r requirements/base.in +docutils==0.16 # via awscli, botocore +ecdsa==0.13.3 # via -r requirements/base.in +idna==2.7 # via requests +jinja2==2.8 # via -r requirements/base.in, ansible +jmespath==0.9.5 # via boto3, botocore +markupsafe==1.1.1 # via jinja2 +mysqlclient==1.3.0 # via -r requirements/base.in +networkx==1.11 # via -r requirements/base.in +paramiko==2.4.2 # via -r requirements/base.in, ansible +pathlib2==2.3.0 # via -r requirements/base.in +prettytable==0.7.2 # via -r requirements/base.in +pyasn1==0.4.8 # via paramiko, rsa +pycparser==2.20 # via cffi +pycrypto==2.6.1 # via -r requirements/base.in +pymongo==3.9.0 # via -r requirements/base.in +pynacl==1.3.0 # via paramiko +python-dateutil==2.8.1 # via botocore +pyyaml==3.12 # via -r requirements/base.in, ansible, awscli +requests==2.20.0 # via -r requirements/base.in, datadog rsa==3.4.2 # via awscli s3transfer==0.1.13 # via awscli, boto3 -scandir==1.7 # via pathlib2 -simplejson==3.15.0 # via datadog -six==1.11.0 # via bcrypt, cryptography, pathlib2, pynacl, python-dateutil -urllib3==1.22 # via requests -wsgiref==0.1.2 -# lock to this version to avoid https://github.com/ansible/ansible/issues/20492 -docker-compose==1.9.0 +six==1.14.0 # via bcrypt, cryptography, pathlib2, pynacl, python-dateutil +urllib3==1.24.3 # via requests + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/requirements/asym-crypto-yaml.txt b/requirements/asym-crypto-yaml.txt new file mode 100644 index 00000000000..8eb9ae77f1e --- /dev/null +++ b/requirements/asym-crypto-yaml.txt @@ -0,0 +1 @@ +asym-crypto-yaml diff --git a/requirements/aws.in b/requirements/aws.in index eec8a15dbef..b3bcd6a2ead 100644 --- a/requirements/aws.in +++ b/requirements/aws.in @@ -1,6 +1,9 @@ # Python dependencies for the aws role -awscli==1.11.58 -boto==2.48.0 +awscli +boto +boto3 futures ; python_version == "2.7" # via s3transfer -s3cmd==1.6.1 +s3cmd +# Lock pyYaml to what's installed on Ubuntu +pyyaml==3.11 diff --git a/requirements/base.in b/requirements/base.in index 35f61556291..a035dc082d9 100644 --- a/requirements/base.in +++ b/requirements/base.in @@ -1,23 +1,19 @@ # Standard dependencies for Ansible runs -ansible==2.3.1.0 +ansible==2.7.12 awscli==1.15.19 boto==2.48.0 boto3==1.7.14 datadog==0.8.0 docopt==0.6.2 -ecdsa==0.13 -futures ; python_version == "2.7" # via s3transfer +ecdsa==0.13.3 Jinja2==2.8 -MarkupSafe==1.0 -MySQL-python==1.2.5 # Needed for the mysql_db module +mysqlclient==1.3.0 # Needed for the mysql_db module networkx==1.11 -paramiko==2.4.1 +paramiko==2.4.2 pathlib2==2.3.0 prettytable==0.7.2 pycrypto==2.6.1 -pymongo==3.2.2 # Needed for the mongo_* modules (playbooks/library/mongo_*) -python-simple-hipchat==0.2 +pymongo==3.9.0 # Needed for the mongo_* modules (playbooks/library/mongo_*) PyYAML==3.12 -requests==2.18.4 -wsgiref==0.1.2 +requests==2.20.0 diff --git a/requirements/celery_progress.in b/requirements/celery_progress.in new file mode 100644 index 00000000000..4da3832dc96 --- /dev/null +++ b/requirements/celery_progress.in @@ -0,0 +1,10 @@ +# Requirements for the celery/redis monitoring script in util/jenkins + +awscli==1.14.32 +backoff==1.4.3 +boto3==1.5.4 +click==6.7 +futures ; python_version == "2.7" # via s3transfer +redis==2.10.6 +opsgenie-sdk==0.3.1 +celery==3.1.25 diff --git a/requirements/cloudflare.in b/requirements/cloudflare.in index dd233f5f087..d4e6108020c 100644 --- a/requirements/cloudflare.in +++ b/requirements/cloudflare.in @@ -1,3 +1,3 @@ # Needed for CloudFlare cache hit rate job in util/jenkins -requests==2.9.1 +requests==2.20.0 diff --git a/requirements/pingdom.in b/requirements/pingdom.in index bbdae4ab6c7..960ea707771 100644 --- a/requirements/pingdom.in +++ b/requirements/pingdom.in @@ -2,4 +2,5 @@ click==6.7 PyYAML==3.12 -requests==2.18.4 +requests==2.20.0 +six==1.14.0 diff --git a/requirements/pip-tools.txt b/requirements/pip-tools.txt index e7027f3fb21..60cd1f61362 100644 --- a/requirements/pip-tools.txt +++ b/requirements/pip-tools.txt @@ -4,7 +4,9 @@ # # make upgrade # -click==6.7 # via pip-tools -first==2.0.1 # via pip-tools -pip-tools==2.0.2 -six==1.11.0 # via pip-tools +click==7.1.2 # via pip-tools +pip-tools==5.1.0 # via -r requirements/pip-tools.in +six==1.14.0 # via pip-tools + +# The following packages are considered to be unsafe in a requirements file: +# pip diff --git a/requirements/ses-limits.in b/requirements/ses-limits.in deleted file mode 100644 index a11d6860813..00000000000 --- a/requirements/ses-limits.in +++ /dev/null @@ -1,5 +0,0 @@ -# Requirements for the SES limits check job - -awscli==1.15.19 -boto3==1.7.14 -futures ; python_version == "2.7" # via s3transfer diff --git a/requirements/vpc-tools.in b/requirements/vpc-tools.in index 7a29f5f9062..c95980083d1 100644 --- a/requirements/vpc-tools.in +++ b/requirements/vpc-tools.in @@ -2,5 +2,4 @@ boto docopt -python-simple-hipchat==0.2 requests diff --git a/requirements3.txt b/requirements3.txt index 78d9ac1c596..3fca4f7088e 100644 --- a/requirements3.txt +++ b/requirements3.txt @@ -1,19 +1,3 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# make upgrade -# -awscli==1.15.19 -boto3==1.7.14 -botocore==1.10.19 # via awscli, boto3, s3transfer -colorama==0.3.7 # via awscli -docutils==0.14 # via awscli, botocore -futures==3.2.0 ; python_version == "2.7" -jmespath==0.9.3 # via boto3, botocore -pyasn1==0.4.2 # via rsa -python-dateutil==2.7.3 # via botocore -pyyaml==3.12 # via awscli -rsa==3.4.2 # via awscli -s3transfer==0.1.13 # via awscli, boto3 -six==1.11.0 # via python-dateutil +# File not removed in order to be backwards compatibility +# Use requirements.txt instead +-r requirements.txt diff --git a/test.mk b/test.mk index 70d83992faa..c9bdc85d077 100644 --- a/test.mk +++ b/test.mk @@ -15,21 +15,18 @@ test.help: test: test.syntax test.playbooks -test.syntax: test.syntax.yml test.syntax.json test.syntax.dockerfiles +test.syntax: test.syntax.yml test.syntax.json test.syntax.yml: $(patsubst %,test.syntax.yml/%,$(yml_files)) test.syntax.yml/%: - python -c "import sys,yaml; yaml.load(open(sys.argv[1]))" $* >/dev/null + python -c "import sys,yaml; yaml.safe_load(open(sys.argv[1]))" $* >/dev/null test.syntax.json: $(patsubst %,test.syntax.json/%,$(json_files)) test.syntax.json/%: jsonlint -v $* -test.syntax.dockerfiles: - python util/check_dockerfile_coverage.py "$(images)" - test.playbooks: tests/test_playbooks.sh diff --git a/tests/test_mongodb_replica_set.py b/tests/test_mongodb_replica_set.py index 80a9470fdbd..9bb940531d6 100644 --- a/tests/test_mongodb_replica_set.py +++ b/tests/test_mongodb_replica_set.py @@ -5,9 +5,10 @@ # 2. rename mongodb_replica_set to mongodb_replica_set.py # 3. python test_mongodb_replica_set.py +from __future__ import absolute_import import mongodb_replica_set as mrs import unittest, mock -from urllib import quote_plus +from six.moves.urllib.parse import quote_plus from copy import deepcopy class TestNoPatchingMongodbReplicaSet(unittest.TestCase): diff --git a/tests/test_playbooks.sh b/tests/test_playbooks.sh index 8c49dc01523..42eddbde4f9 100755 --- a/tests/test_playbooks.sh +++ b/tests/test_playbooks.sh @@ -16,7 +16,7 @@ ansible-playbook -i localhost, --syntax-check travis-test.yml output_dir="$PWD/test_output/env-dep" mkdir -p $output_dir -ansible-playbook -i localhost, -c local --tags edxapp_cfg edxapp.yml -e edxapp_user=`whoami` -e edxapp_app_dir=$output_dir -e edxapp_code_dir=$output_dir -e EDXAPP_EDXAPP_SECRET_KEY='DUMMY TRAVIS KEY' +ansible-playbook -i localhost, -c local --tags "common_directories,edxapp_cfg" edxapp.yml -e edxapp_user=`whoami` -e edxapp_app_dir=$output_dir -e edxapp_code_dir=$output_dir -e EDXAPP_EDXAPP_SECRET_KEY='DUMMY TRAVIS KEY' root_dir=$output_dir environment_deployments="." diff --git a/util/ansible_msg.py b/util/ansible_msg.py index 57e1f3460c2..9c3aa38642a 100755 --- a/util/ansible_msg.py +++ b/util/ansible_msg.py @@ -1,6 +1,8 @@ -#!/usr/bin/env python3.6 +#!/usr/bin/env python """Simple utility for deciphering Ansible jsonized task output.""" +from __future__ import print_function + import json import sys @@ -13,28 +15,30 @@ f = sys.stdin junk = f.read() +if not junk: + print("No message to decode.") + sys.exit() # junk: # '==> default: failed: [localhost] (item=/edx/app/edx_ansible/edx_ansible/requirements.txt) => {"cmd": "/edx/app/edx...' -print("Stdin is {} chars: {!r}...{!r}".format(len(junk), junk[:40], junk[-40:])) - junk = junk.replace('\n', '') junk = junk[junk.index('=> {')+3:] junk = junk[:junk.rindex('}')+1] data = json.loads(junk) -GOOD_KEYS = ['cmd', 'msg', 'stdout', 'stderr', 'module_stdout', 'module_stderr', 'warnings'] -for key in GOOD_KEYS: - if data.get(key): - print(f"== {key} ===========================") - print(data[key]) - -BAD_KEYS = ['stdout_lines', 'start', 'end', 'delta', 'changed', 'failed', 'rc', 'item'] +# Order these so that the most likely useful messages are last. +GOOD_KEYS = ['cmd', 'module_stdout', 'module_stderr', 'warnings', 'msg', 'censored', 'stderr', 'stdout'] +IGNORE_KEYS = ['stdout_lines', 'stderr_lines', 'start', 'end', 'delta', 'changed', 'failed', 'rc', 'item'] -unknown_keys = set(data) - set(GOOD_KEYS) - set(BAD_KEYS) +unknown_keys = set(data) - set(GOOD_KEYS) - set(IGNORE_KEYS) if unknown_keys: print("== Unknown keys ======================") for key in unknown_keys: - print(f"{key}: {data[key]!r:80}") + print("{key}: {val!r:80}".format(key=key, val=data[key])) + +for key in GOOD_KEYS: + if data.get(key): + print("== {key} ===========================".format(key=key)) + print((data[key])) diff --git a/util/asg_event_notifications_util.py b/util/asg_event_notifications_util.py new file mode 100644 index 00000000000..86f0aee74b9 --- /dev/null +++ b/util/asg_event_notifications_util.py @@ -0,0 +1,100 @@ +from __future__ import absolute_import +from __future__ import print_function +import boto3 +import click + +@click.group() +def cli(): + pass + +def get_asg_infos(): + + response = client.describe_auto_scaling_groups(MaxRecords=100) + auto_scaling_groups = response['AutoScalingGroups'] + + return auto_scaling_groups + +def get_asg_names(): + + asg_names = list() + for asg in get_asg_infos(): + asg_names.append(asg['AutoScalingGroupName']) + + return asg_names + +def get_asg_event_notifications(asg): + + event_notifications = list() + response = \ + client.describe_notification_configurations(AutoScalingGroupNames=[asg], + MaxRecords=100) + notification_configs = response['NotificationConfigurations'] + for notification in notification_configs: + event_notifications.append(notification['NotificationType']) + + return event_notifications + +@click.command() +def show_asg_event_notifications(): + + try: + + for asg in get_asg_names(): + event_notifications = get_asg_event_notifications(asg) + + if event_notifications: + print(("Event notifications: {0} are set for ASG: {1}".format(event_notifications, + asg))) + else: + print(("No Event Notifications found for ASG {}".format(asg))) + except Exception as e: + + print(e) + +@click.command() +@click.option('--topic_arn', help='The ARN of Amazon SNS topic', + required=True) +@click.option('--event', + help='The type of event that causes the notification to be sent' + , default='autoscaling:EC2_INSTANCE_LAUNCH_ERROR') +@click.option('--confirm', default=False, required=False, is_flag=True, + help='Set this to create event notification for asg') +def create_asg_event_notifications( + topic_arn, + event, + confirm, + ): + + asg_names = get_asg_names() + asg_to_create_event_notifications = list() + + for asg_name in asg_names: + + event_notifications = get_asg_event_notifications(asg_name) + + if event in event_notifications: + continue + else: + asg_to_create_event_notifications.append(asg_name) + + if confirm is False: + print(("Would have created the event notification for asgs {}".format(asg_to_create_event_notifications))) + else: + try: + for asg in asg_to_create_event_notifications: + + response = \ + client.put_notification_configuration(AutoScalingGroupName=asg, + TopicARN=topic_arn, NotificationTypes=[event]) + + print(("Created {0} event notifications for auto scaling group {1}").format(event, + asg)) + except Exception as e: + print(e) + +cli.add_command(show_asg_event_notifications) +cli.add_command(create_asg_event_notifications) +if __name__ == '__main__': + + client = boto3.client('autoscaling') + cli() diff --git a/util/aws_ip_locator/example b/util/aws_ip_locator/example new file mode 100644 index 00000000000..71a44c4281a --- /dev/null +++ b/util/aws_ip_locator/example @@ -0,0 +1,53 @@ + + +This programs collects ips from various AWS services such as EC2, RDS, Elasticache etc.. + + +Usage: +. assume_role role 555555 +python ipcollector.py collect_ips --file_name ../../../edx-secure/optiv/ip_locator_inputs/edx.json + +Example file input: + +[{ + "title": "External ip list", + "external_hostnames": [ + "some-site.com", + "courses.edx.org" + ] + }, + { + "title": "Internal ip list", + "ec2_name_tags": [{ + "display_name": "display name 1", + "aws_tag_name": "some aws ec2 instance tag" + }, + { + "display_name": "display name 2", + "aws_tag_name": "some other tag" + } + ], + "ec2_elb_name_tags": [{ + "display_name": "some-elb", + "elb_name": "some-elb-name" + }, + { + "display_name": "(Expected unreachable) my-other-elb", + "elb_name": "some-other-elb" + } + ], + + "elasticache_clusters": [{ + "display_name": "redis", + "cluster_id": "some redis instance id" + }], + "rds_instances": [{ + "display_name": "some interesting RDS", + "instance_id": "actual internal instance name for interesting rds" + }], + "static_entries": [{ + "display_name": "Static report entry", + "display_value": "Static report value" + }] + } +] diff --git a/util/aws_ip_locator/ipcollector.py b/util/aws_ip_locator/ipcollector.py new file mode 100644 index 00000000000..31d1f082e8c --- /dev/null +++ b/util/aws_ip_locator/ipcollector.py @@ -0,0 +1,160 @@ +from __future__ import absolute_import +from __future__ import print_function +import boto3 +import click +import socket +import json + +@click.group() +def cli(): + pass + +@click.command() +@click.option('--file_name', + required=True, + help=""" + file containing tags name etc that you would like to find ips for, see examples for an example of this input""") +def collect_ips(file_name): + output_json = json.load(open(file_name)) + + for entry in output_json: + print_header(entry['title']) + + external_hostnames_key = 'external_hostnames' + if external_hostnames_key in entry: + external_hostnames = entry[external_hostnames_key] + for hostname in external_hostnames: + print_line_item(hostname, get_ip_for_hostname(hostname)) + + ec2_instance_name_tags_key = 'ec2_instance_name_tags' + if ec2_instance_name_tags_key in entry: + ec2_name_tags = entry[ec2_instance_name_tags_key] + for pair in ec2_name_tags: + display_name = pair['display_name'] + aws_tag_name = pair['aws_tag_name'] + ip = get_instance_ip_by_name_tag(aws_tag_name) + print_line_item(display_name, ip) + + ec2_elb_name_tags_key = 'ec2_elb_name_tags' + if ec2_elb_name_tags_key in entry: + ec2_elb_name_tags = entry[ec2_elb_name_tags_key] + for pair in ec2_elb_name_tags: + display_name = pair['display_name'] + elb_name = pair['elb_name'] + ip = get_elb_ip_by_elb_name(elb_name) + print_line_item(display_name, ip) + + elasticache_clusters_key = 'elasticache_clusters' + if elasticache_clusters_key in entry: + elasticache_clusters = entry[elasticache_clusters_key] + for cluster in elasticache_clusters: + display_name = cluster['display_name'] + cluster_id = cluster['cluster_id'] + print_line_item(display_name, get_elasticache_ip_by_cluster_id(cluster_id)) + + rds_instances_key = 'rds_instances' + if rds_instances_key in entry: + rds_instances = entry[rds_instances_key] + for instance in rds_instances: + display_name = instance['display_name'] + instance_id = None + if 'instance_id' in instance: + instance_id = instance['instance_id'] + print_line_item(display_name, get_rds_ip_by_instance_id(instance_id)) + elif 'cluster_id' in instance: + cluster_id = instance['cluster_id'] + instance_id = get_writer_instance_id_by_cluster_id(cluster_id) + print_line_item(display_name, get_rds_ip_by_instance_id(instance_id)) + else: + raise ValueError('Cant locate RDS instance without instance_id or cluster_id') + + static_entries_key = 'static_entries' + if static_entries_key in entry: + static_entries = entry[static_entries_key] + for item in static_entries: + display_name = item['display_name'] + display_value = item['display_value'] + print_line_item(display_name, display_value) + + +cli.add_command(collect_ips) + +def get_ip_for_hostname(hostname): + return socket.gethostbyname(hostname) + +def print_header(name): + header =""" +============================ +{0} +============================""" + print((header.format(name))) + +def print_line_item(target, ip): + line = "[ * ] {0} - {1}" + print((line.format(target, ip))) + +def get_instance_ip_by_name_tag(value): + client = boto3.client('ec2') + filters = [{ + 'Name': 'tag:Name', + 'Values': [value] + }] + + response = client.describe_instances(Filters=filters) + + for r in response['Reservations']: + for i in r['Instances']: + if(i['State']['Name'] == 'running'): + ip = i['PrivateIpAddress'] + return ip + +def get_elb_ip_by_elb_name(elb_name): + client = boto3.client('elb') + response = client.describe_load_balancers( + LoadBalancerNames=[ + elb_name, + ] + ) + hostname = response['LoadBalancerDescriptions'][0]['DNSName'] + return get_ip_for_hostname(hostname) + +def get_elasticache_ip_by_cluster_id(cluster_id): + client = boto3.client('elasticache') + response = client.describe_cache_clusters( + CacheClusterId=cluster_id, + ShowCacheNodeInfo=True, + ) + hostname = response['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] + return get_ip_for_hostname(hostname) + + +def get_elasticache_ip_by_cluster_id(cluster_id): + client = boto3.client('elasticache') + response = client.describe_cache_clusters( + CacheClusterId=cluster_id, + ShowCacheNodeInfo=True, + ) + hostname = response['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] + return get_ip_for_hostname(hostname) + +def get_writer_instance_id_by_cluster_id(cluster_id): + client = boto3.client('rds') + response = client.describe_db_clusters( + DBClusterIdentifier=cluster_id + ) + members = response['DBClusters'][0]['DBClusterMembers'] + for member in members: + if member['IsClusterWriter']: + return member['DBInstanceIdentifier'] + raise ValueError('Could not locate RDS instance with given instance_id or cluster_id') + +def get_rds_ip_by_instance_id(instance_id): + client = boto3.client('rds') + response = client.describe_db_instances( + DBInstanceIdentifier=instance_id, + ) + hostname = response['DBInstances'][0]['Endpoint']['Address'] + return get_ip_for_hostname(hostname) + +if __name__ == '__main__': + cli() diff --git a/util/aws_ip_locator/requirements.txt b/util/aws_ip_locator/requirements.txt new file mode 100644 index 00000000000..c98674c95f8 --- /dev/null +++ b/util/aws_ip_locator/requirements.txt @@ -0,0 +1,11 @@ +boto3==1.9.0 +botocore==1.12.0 +click==6.7 +docutils==0.14 +futures==3.2.0 +jmespath==0.9.3 +netaddr==0.7.19 +python-dateutil==2.7.3 +s3transfer==0.1.13 +six==1.11.0 +urllib3==1.24.2 diff --git a/util/bake_config.sh b/util/bake_config.sh new file mode 100755 index 00000000000..980ba197d4a --- /dev/null +++ b/util/bake_config.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# For instructions on how to use this script see https://openedx.atlassian.net/wiki/spaces/EdxOps/pages/390627556/How+to+run+baked+config+on+your+laptop + +# Exit on fail +set -e + +# Enforce required envs +: ${WORKSPACE?"Need to set WORKSPACE"} +: ${CONFIG_RENDERING_TARGET?"Need to set CONFIG_RENDERING_TARGET"} + +# Optional envs you can override if you wish to render config for different EDPs +# these are expected to be comma separated with no spaces, see defaults. +ENVIRONMENT_DEPLOYMENTS=${ENVIRONMENT_DEPLOYMENTS:=stage-edx,prod-edx,prod-edge,developer-sandbox} +PLAYS=${PLAYS:=edxapp,veda_web_frontend,analyticsapi,credentials,ecommerce,discovery,ecomworker,insights,notifier,registrar,notes} + +rm -rf $CONFIG_RENDERING_TARGET +cd $WORKSPACE/configuration/playbooks + +for ENVIRONMENT_DEPLOYMENT in $(echo $ENVIRONMENT_DEPLOYMENTS | sed "s/,/ /g") +do + ENVIRONMENT="$(echo $ENVIRONMENT_DEPLOYMENT | cut -d - -f 1 )" + DEPLOY="$(echo $ENVIRONMENT_DEPLOYMENT | cut -d - -f 2 )" + VARS="-e@$WORKSPACE/${DEPLOY}-internal/ansible/vars/${DEPLOY}.yml -e@$WORKSPACE/${DEPLOY}-internal/ansible/vars/${ENVIRONMENT_DEPLOYMENT}.yml -e@$WORKSPACE/${DEPLOY}-secure/ansible/vars/${DEPLOY}.yml -e@$WORKSPACE/${DEPLOY}-secure/ansible/vars/${ENVIRONMENT_DEPLOYMENT}.yml" + + if [ "${ENVIRONMENT_DEPLOYMENT}" == "developer-sandbox" ]; then + VARS="-e@$WORKSPACE/${DEPLOY}-internal/ansible/vars/${ENVIRONMENT_DEPLOYMENT}.yml -e@$WORKSPACE/${DEPLOY}-secure/ansible/vars/${ENVIRONMENT_DEPLOYMENT}.yml -e ansible_ec2_public_ipv4=LINTING" + fi + + mkdir -p $CONFIG_RENDERING_TARGET/$ENVIRONMENT_DEPLOYMENT + + # PLAYS for Environment/Deployment + for PLAY in $(echo $PLAYS | sed "s/,/ /g") + do + if [ "$PLAY" == "edxapp" ]; then + # LMS / CMS for Environment/Deployment + ansible-playbook --become-user=$(whoami) -vvv -c local -i 'localhost,' --tags edxapp_cfg_yaml_only ./edxapp.yml $VARS -e edxapp_user=$(whoami) -e common_web_group=$(whoami) -e COMMON_CFG_DIR=$CONFIG_RENDERING_TARGET/$ENVIRONMENT_DEPLOYMENT + else + # All other IDAs + ansible-playbook --become-user=$(whoami) -vvv -c local -i 'localhost,' --tags install:app-configuration ./$PLAY.yml $VARS -e COMMON_CFG_DIR=$CONFIG_RENDERING_TARGET/$ENVIRONMENT_DEPLOYMENT + fi + done +done diff --git a/util/baked_config.sh b/util/baked_config.sh deleted file mode 100755 index f6cbdc2cb87..00000000000 --- a/util/baked_config.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# For instructions on how to use this script see https://openedx.atlassian.net/wiki/spaces/EdxOps/pages/390627556/How+to+run+baked+config+on+your+laptop - -pushd ../.. > /dev/null -WORKSPACE="$(pwd)" -popd > /dev/null - -ENVIRONMENT="$(echo $1 | cut -d - -f 1 )" -DEPLOY="$(echo $1 | cut -d - -f 2 )" -E_D="${ENVIRONMENT}-${DEPLOY}" - -VARS="-e@$WORKSPACE/${DEPLOY}-internal/ansible/vars/${DEPLOY}.yml -e@$WORKSPACE/${DEPLOY}-internal/ansible/vars/${E_D}.yml -e@$WORKSPACE/${DEPLOY}-secure/ansible/vars/${DEPLOY}.yml -e@$WORKSPACE/${DEPLOY}-secure/ansible/vars/${E_D}.yml" - -if [ "${E_D}" == "developer-sandbox" ]; then - VARS="-e@$WORKSPACE/${DEPLOY}-internal/ansible/vars/${E_D}.yml -e@$WORKSPACE/${DEPLOY}-secure/ansible/vars/${E_D}.yml" -fi - -if [ ! -e "$WORKSPACE/${DEPLOY}-internal/ansible/vars/${E_D}.yml" ]; then - echo "Please specify a valid environment-deployment (i.e. stage-edx) as the first and only argument" - exit 1 -fi - -mkdir -p $WORKSPACE/baked-config-secure/${E_D} - -cd ../playbooks/ -ansible-playbook -vvv -c local -i 'localhost,' --tags edxapp_cfg ./edxapp.yml ${VARS} -e edxapp_user=$(whoami) -e common_web_group=$(whoami) -e edxapp_app_dir=$WORKSPACE/baked-config-secure/${E_D} -e edxapp_code_dir=$WORKSPACE/edx-platform -s --ask-sudo-pass --diff diff --git a/util/balancecontainers.py b/util/balancecontainers.py deleted file mode 100644 index 86a376ec726..00000000000 --- a/util/balancecontainers.py +++ /dev/null @@ -1,105 +0,0 @@ -import argparse -import logging -import os -import sys - -try: - # This script is used by docker.mk at parse-time, which means when you run - # "make requirements" to install the required Python packages, this script - # runs before its requirements are installed. That means this import will - # fail. To prevent a successful installation from having irrelevant error - # messages, we catch the failure and exit silently. - import pathlib2 -except ImportError: - sys.exit(1) - -import docker_images - - -TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR", "") -CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml") -LOGGER = logging.getLogger(__name__) - -def pack_shards(used_images, num_shards): - """ - Determines an approximation of the optimal way to pack the images into a given number of shards so as to - equalize the execution time amongst the shards. - - Input: - used_images: A set of Docker images and their ranks - num_shards: A number of shards amongst which to distribute the Docker images - """ - - # sorts used containers in descending order on the weight - sorted_images = sorted(used_images, key = lambda x: x[1], reverse=True) - - shards = [] - - # for the number of shards - for i in range(0, num_shards): - # initialize initial dict - shards.append({"images": [], "sum": 0}) - - # for each container - for image in sorted_images: - # find the shard with the current minimum execution time - shard = min(shards, key = lambda x: x["sum"]) - - # add the current container to the shard - shard["images"].append(image) - - # add the current container's weight to the shard's total expected execution time - shard["sum"] += image[1] - - return shards - -def read_input(): - """ - Reads input from standard input. - """ - - images = [] - - # get images from standard in - for line in sys.stdin: - line = line.strip() - line = line.strip("[]") - - items = line.split() - images.extend(items) - - return images - -def arg_parse(): - - parser = argparse.ArgumentParser(description = 'Given a list of containers as input and a number of shards, ' - 'finds an approximation of the optimal distribution of the containers over the shards, provided a set of hard-coded weights ' - 'in parsefiles_config.yml.') - parser.add_argument('num_shards', type = int, help = "the number of shards amongst which to distribute Docker builds") - - return parser.parse_args() - -if __name__ == '__main__': - - args = arg_parse() - - # configure logging - logging.basicConfig() - - # get input from standard in - images = read_input() - - # get images that are used and described in configuration file - used_images = docker_images.get_used_images(images) - - # find optimal packing of the images amongst shards - shards = pack_shards(used_images, args.num_shards) - - # print space separated list of containers for each shard - for shard in shards: - middle = " " - - conts = [x[0] for x in shard["images"]] - - line = middle.join(conts) - print line diff --git a/util/check_dockerfile_coverage.py b/util/check_dockerfile_coverage.py deleted file mode 100644 index b6267c0db70..00000000000 --- a/util/check_dockerfile_coverage.py +++ /dev/null @@ -1,54 +0,0 @@ -import yaml -import os -import pathlib2 -import itertools -import argparse -import logging -import sys -import docker_images - -TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR", ".") -CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml") -LOGGER = logging.getLogger(__name__) - -def check_coverage(images, used_images): - """ - Checks whether all images are described in parsefiles_config.yml and raises an error otherwise, directing toward documentation to resolving the error. - - Input: - images: the set of images scheduled to be built - used_images: the subset of images with their ranks that are in the parsefiles_config.yml file - """ - - # determine which Dockerfiles are not covered; i.e. the set difference of the Dockerfiles to build minus the Dockerfile - # available to be built is non-empty - uncovered = set(images) - set([x[0] for x in used_images]) - - # exit with error code if uncovered Dockerfiles exist - if uncovered: - LOGGER.error("The following Dockerfiles are not described in the parsefiles_config.yml file: {}. Please see the following documentation on how to add Dockerfile ranks to the configuration file: {}".format(uncovered, "https://github.com/edx/configuration/blob/master/util/README.rst")) - sys.exit(1) - -def arg_parse(): - - parser = argparse.ArgumentParser(description = 'Given a list of images as input checks that each input image is described correctly in parsefiles_config.yml') - parser.add_argument('images', help = "the Dockerfiles that need to be built as the result of some commit change and whose coverage is checked") - return parser.parse_args() - -if __name__ == '__main__': - - args = arg_parse() - - # configure logging - logging.basicConfig() - - # read input - images = [] - - for i in args.images.split(): - images.append(i) - - # get images that are used and described in configuration file - used_images = docker_images.get_used_images(images) - - check_coverage(images, used_images) diff --git a/util/check_for_key_collisions/README.md b/util/check_for_key_collisions/README.md new file mode 100644 index 00000000000..5a7be77a239 --- /dev/null +++ b/util/check_for_key_collisions/README.md @@ -0,0 +1,4 @@ +Finds if there are colliding keys in a set of yaml/json files that might collide when ansible merges happen + +USAGE: +python check_for_yaml_key_collisions/check_for_yaml_key_collisions.py --files file1.yml --files file2.json \ No newline at end of file diff --git a/util/check_for_key_collisions/check_for_key_collisions.py b/util/check_for_key_collisions/check_for_key_collisions.py new file mode 100644 index 00000000000..b06d6705351 --- /dev/null +++ b/util/check_for_key_collisions/check_for_key_collisions.py @@ -0,0 +1,41 @@ +from __future__ import absolute_import +from __future__ import print_function +import click +import yaml +import json +from collections import defaultdict +import six + +@click.command() +@click.option('--files', '-m', multiple=True) +def check_for_yaml_key_collisions(files): + values_for_keys = defaultdict(lambda: []) + for file_path in files: + content = None + if file_path.endswith(".yml") or file_path.endswith(".yaml"): + stream = open(file_path, 'r') + content = yaml.safe_load(stream) + elif file_path.endswith(".json"): + with open(file_path, "r") as read_file: + content = json.load(read_file) + for key, value in six.iteritems(content): + values_for_keys[key].append(value) + + collisions = {} + + for key,value in six.iteritems(values_for_keys): + if len(value) > 1: + collisions[key] = value + + + if len(list(collisions.keys())) > 0: + print((str.format("Found key collisions: {}", len(collisions)))) + for key,value in six.iteritems(collisions): + print((str.format("{} {}", key, value))) + exit(1) + else: + print("No collisions found") + exit(0) + +if __name__ == '__main__': + check_for_yaml_key_collisions() diff --git a/util/check_for_key_collisions/requirements.txt b/util/check_for_key_collisions/requirements.txt new file mode 100644 index 00000000000..edb74a8e7be --- /dev/null +++ b/util/check_for_key_collisions/requirements.txt @@ -0,0 +1,2 @@ +Click==7.0 +PyYAML==5.1 diff --git a/util/check_rds_slow_query_logs/check_rds_slow_query_logs.py b/util/check_rds_slow_query_logs/check_rds_slow_query_logs.py new file mode 100644 index 00000000000..15aa63e4c28 --- /dev/null +++ b/util/check_rds_slow_query_logs/check_rds_slow_query_logs.py @@ -0,0 +1,105 @@ +from __future__ import absolute_import +from __future__ import print_function +import boto3 +import click + + +def get_db_instances(db_engine): + + """ + Returns: + List of provisioned RDS instances + """ + + if db_engine == "mysql": + instances = rds.describe_db_instances()['DBInstances'] + elif db_engine == "aurora": + instances = rds.describe_db_clusters()['DBClusters'] + return instances + + +def get_db_parameters(db_engine, db_parameter_group, marker): + + """ + Returns: + The detailed parameter list for a particular DB parameter + group Using marker as pagination token as at max it returns + 100 records + """ + + if db_engine == "mysql": + response = rds.describe_db_parameters( + DBParameterGroupName=db_parameter_group, + Marker=marker) + elif db_engine == "aurora": + response = rds.describe_db_cluster_parameters( + DBClusterParameterGroupName=db_parameter_group, + Marker=marker) + return response + + +def check_slow_query_logs(db_engine, db_parameter_group): + + slow_log_enabled = False + + marker = "" + + while True: + + if marker is None: + break + + response = get_db_parameters(db_engine, db_parameter_group, marker) + marker = response.get('Marker') + parameters = response.get('Parameters') + + for param in parameters: + + if 'slow_query_log' in param['ParameterName']: + + if 'ParameterValue' in param and param['ParameterValue'] == '1': + slow_log_enabled = True + break + + return slow_log_enabled + + +@click.command() +@click.option('--db_engine', help='RDS engine: mysql or aurora', required=True) +@click.option('--whitelist', type=(str), multiple=True, help='Whitelisted RDS Instances') +def cli(db_engine, whitelist): + + ignore_rds = list(whitelist) + slow_query_logs_disabled_rds = [] + exit_status = 0 + + dbhosts = get_db_instances(db_engine) + + for dbhost in dbhosts: + + if db_engine == "mysql": + db_identifier = dbhost['DBInstanceIdentifier'] + if db_identifier in ignore_rds: + continue + + db_parameter_group = dbhost['DBParameterGroups'][0]['DBParameterGroupName'] + elif db_engine == "aurora": + db_identifier = dbhost['DBClusterIdentifier'] + if db_identifier in ignore_rds: + continue + + db_parameter_group = dbhost['DBClusterParameterGroup'] + + slow_query_logs_enabled = check_slow_query_logs(db_engine, db_parameter_group) + + if not slow_query_logs_enabled: + exit_status = 1 + slow_query_logs_disabled_rds.append(db_identifier) + + print(("Slow query logs are disabled for RDS Instances\n{0}".format(slow_query_logs_disabled_rds))) + exit(exit_status) + +if __name__ == '__main__': + + rds = boto3.client('rds') + cli() diff --git a/util/check_rds_slow_query_logs/requirements.txt b/util/check_rds_slow_query_logs/requirements.txt new file mode 100644 index 00000000000..9a09b17b262 --- /dev/null +++ b/util/check_rds_slow_query_logs/requirements.txt @@ -0,0 +1,9 @@ +boto3==1.9.96 +botocore==1.12.214 +Click==7.0 +docutils==0.15.2 +jmespath==0.9.4 +python-dateutil==2.8.0 +s3transfer==0.2.1 +six==1.12.0 +urllib3==1.25.3 diff --git a/util/cloudflare/by_origin_purger/README.md b/util/cloudflare/by_origin_purger/README.md new file mode 100644 index 00000000000..e001dd35204 --- /dev/null +++ b/util/cloudflare/by_origin_purger/README.md @@ -0,0 +1,34 @@ + +Cloudflare cache keys include the origin, so in order to purge assets with cached CORS headers you need to +purge cloudflare cache assets by origin + + + +build target list like so: +aws s3 ls s3://bucket-url/path --recursive | awk '{print $4}' > targets + +Make sure this seems reasonable... +cat targets +cat targets | wc -l + + python purger.py --origin https://example.edu --cloudflare_site_url https://cloudflare-example.net --target_path targets + Will purge: https://cloudflare-example.net/headerCCE-V230100/headerCCE-V230100.m3u8 at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V230400/headerABC-V230400_3_49.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V230600/headerABC-V230600_5_13.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V230700/headerABC-V230700_6_46.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V231100/headerABC-V231100_1_5.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V231200/headerABC-V231200_6_1.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V231700/headerABC-V231700_2_11.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V231900/headerABC-V231900_6_12.ts at origin https://example.edu and 500 others like it. Add --confirm to execute. + Will purge: https://cloudflare-example.net/headerABC-V232000/headerABC-V232000_6_28.ts at origin https://example.edu and 51 others like it. Add --confirm to execute. + + python purger.py --origin https://example.edu --cloudflare_site_url https://cloudflare-example.net --target_path targets + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} + {'result': {'id': 'BOSYunXGVf3uMevCy4J0Tk7AuuU849'}, 'success': True, 'errors': [], 'messages': []} diff --git a/util/cloudflare/by_origin_purger/purger.py b/util/cloudflare/by_origin_purger/purger.py new file mode 100644 index 00000000000..916aedf4297 --- /dev/null +++ b/util/cloudflare/by_origin_purger/purger.py @@ -0,0 +1,56 @@ +from __future__ import absolute_import +from __future__ import print_function +import requests +import click +from six.moves import range + + + +def wrap(cloudflare_site_url, s3_asset_path, origin): + url = str.format("{}/{}", cloudflare_site_url, s3_asset_path) + return { + "url": url, + "headers": { + "Origin": origin + } + } + +def divide_chunks(list_to_divide, number_in_chunk): + for index in range(0, len(list_to_divide), number_in_chunk): + yield list_to_divide[index:index + number_in_chunk] + +@click.command() +@click.option('--cloudflare_email', required=True, envvar='CLOUDFLARE_EMAIL') +@click.option('--cloudflare_api_key', required=True, envvar='CLOUDFLARE_API_KEY') +@click.option('--cloudflare_zone_id', required=True, envvar='CLOUDFLARE_ZONE_ID', help='Get this from the zones API endpoint') +@click.option('--origin', required=True) +@click.option('--cloudflare_site_url') +@click.option('--target_path', required=True) +@click.option('--confirm', is_flag=True) +def purge(cloudflare_email, cloudflare_api_key, cloudflare_zone_id, origin, cloudflare_site_url, target_path, confirm): + with open(target_path) as f: + lines = f.readlines() + + lines = [x.strip() for x in lines] + for index, s3_asset_path in enumerate(lines): + lines[index] = wrap(cloudflare_site_url, s3_asset_path, origin) + + chunk_size = 500 + chunks = divide_chunks(lines, chunk_size) + for chunk in chunks: + if not confirm: + print((str.format("Will purge: {} at origin {} and {} others like it. Add --confirm to execute.", chunk[0]['url'], chunk[0]['headers']['Origin'], len(chunk)))) + else: + headers = {'X-Auth-Email': cloudflare_email, + 'X-Auth-Key': cloudflare_api_key, + 'Content-Type': 'application/json'} + payload = { + "files": chunk + } + url = str.format("https://api.cloudflare.com/client/v4/zones/{cloudflare_zone_id}/purge_cache", cloudflare_zone_id=cloudflare_zone_id) + response = requests.delete(url, headers=headers, json=payload) + print((response.json())) + +if __name__ == '__main__': + purge() + diff --git a/util/cloudflare/by_origin_purger/requirements.txt b/util/cloudflare/by_origin_purger/requirements.txt new file mode 100644 index 00000000000..9b31d71a232 --- /dev/null +++ b/util/cloudflare/by_origin_purger/requirements.txt @@ -0,0 +1,6 @@ +certifi==2018.10.15 +chardet==3.0.4 +Click==7.0 +idna==2.7 +requests==2.20.0 +urllib3==1.24.2 diff --git a/util/cluster_instance_monitoring.py b/util/cluster_instance_monitoring.py index 970ec9636ee..7eb43bdf305 100644 --- a/util/cluster_instance_monitoring.py +++ b/util/cluster_instance_monitoring.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import +from __future__ import print_function import boto3 import argparse import sys @@ -73,9 +75,9 @@ def find_active_instances(cluster_file, region): if no_active_instances_triples: print("Fail. There are no active instances for the following cluster(s)") for triple in no_active_instances_triples: - print('environment: ' + triple.split('-')[0]) - print('deployment: ' + triple.split('-')[1]) - print('cluster: ' + triple.split('-')[2]) + print(('environment: ' + triple.split('-')[0])) + print(('deployment: ' + triple.split('-')[1])) + print(('cluster: ' + triple.split('-')[2])) print('----') sys.exit(1) diff --git a/util/config/merge_json_to_yaml b/util/config/merge_json_to_yaml new file mode 100755 index 00000000000..f0bed725617 --- /dev/null +++ b/util/config/merge_json_to_yaml @@ -0,0 +1,20 @@ +#! /usr/bin/env python + +import json +import click +import yaml +import sys + +@click.command() +@click.argument('files', nargs=-1, type=click.Path()) +def join_json(files): + """ This script merges multiple JSON documents into a single namespace, then dumps as YAML """ + data = dict() + for file in files: + click.echo('# ingested %s' % file) + with open(file) as filehandle: + data.update(json.load(filehandle)) + print yaml.safe_dump(data) + +if __name__ == '__main__': + join_json() diff --git a/util/docker_images.py b/util/docker_images.py deleted file mode 100644 index 0a54f0c0952..00000000000 --- a/util/docker_images.py +++ /dev/null @@ -1,36 +0,0 @@ -import yaml -import os -import pathlib2 -import itertools -import sys - -TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR", "") -CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml") - -def get_used_images(images): - """ - Returns the images and their ranks that are scheduled to be built and that exist in the configuration file. - - Input: - images: A set of Docker images - """ - - # open config file containing container weights - config_file_path = pathlib2.Path(CONFIG_FILE_PATH) - - with (config_file_path.open(mode='r')) as file: - try: - config = yaml.load(file) - except yaml.YAMLError, exc: - LOGGER.error("error in configuration file: %s" % str(exc)) - sys.exit(1) - - # get container weights - weights = config.get("weights") - - # convert all images in config file to a list of tuples (, ) - weights_list = [x.items() for x in weights] - weights_list = list(itertools.chain.from_iterable(weights_list)) - - # performs intersection between weighted images and input images - return [x for x in weights_list if x[0] in images] diff --git a/util/elasticsearch/requirements.txt b/util/elasticsearch/requirements.txt index 536dd3499dc..715c9da254e 100644 --- a/util/elasticsearch/requirements.txt +++ b/util/elasticsearch/requirements.txt @@ -4,7 +4,9 @@ # # make upgrade # -deepdiff==3.1.0 -elasticsearch==0.4.5 -jsonpickle==0.9.6 # via deepdiff -urllib3==1.22 # via elasticsearch +deepdiff==3.1.0 # via -r requirements/elasticsearch.in +elasticsearch==0.4.5 # via -r requirements/elasticsearch.in +importlib-metadata==1.6.0 # via jsonpickle +jsonpickle==1.4.1 # via deepdiff +urllib3==1.25.9 # via elasticsearch +zipp==3.1.0 # via importlib-metadata diff --git a/util/elasticsearch/verify-index-copy.py b/util/elasticsearch/verify-index-copy.py index 4e6ec8432cb..5f1cfecc1ca 100755 --- a/util/elasticsearch/verify-index-copy.py +++ b/util/elasticsearch/verify-index-copy.py @@ -3,6 +3,8 @@ Verifies that an index was correctly copied from one ES host to another. """ +from __future__ import absolute_import +from __future__ import print_function import itertools import pprint import random @@ -56,7 +58,7 @@ def grouper(iterable, n): """ # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" args = [iter(iterable)] * n - return itertools.izip_longest(*args) + return itertools.zip_longest(*args) def docs_match(old_doc, new_doc): @@ -122,8 +124,8 @@ def docs_match(old_doc, new_doc): #if this fails something is horribly wrong if set(diff_doc.keys()) != set(diff_types): - print 'ERROR: expected to be diffing dictionaries, got something else! id: {}'.format( - new_doc['_id']) + print('ERROR: expected to be diffing dictionaries, got something else! id: {}'.format( + new_doc['_id'])) for diff_type in diff_types: for field in ignorable_fields: @@ -163,13 +165,13 @@ def find_matching_ids(es, index, ids, docs): if docs_match(docs[elt['_id']], elt): matching += 1 else: - print 'FAILURE: Documents with id {id} do not match: '.format( + print('FAILURE: Documents with id {id} do not match: '.format( id=elt['_id'] - ) + repr({'diff': DeepDiff(docs[elt['_id']], elt), 'new': elt, 'old': docs[elt['_id']]}) + ) + repr({'diff': DeepDiff(docs[elt['_id']], elt), 'new': elt, 'old': docs[elt['_id']]})) else: - print 'ERROR: Document with id {id} missing: {doc}'.format( + print('ERROR: Document with id {id} missing: {doc}'.format( id=elt['_id'], doc=docs[elt['_id']] - ) + )) return matching @@ -198,12 +200,12 @@ def scan_documents(old_es, new_es, old_index, new_index): matching += find_matching_ids(new_es, new_index, old_elt_ids, old_elt_docs) total += len(old_elt_ids) if total % 100 == 0: - print 'processed {} items'.format(total) + print('processed {} items'.format(total)) ratio = float(matching)/total - print "{}: scanned documents matching ({} out of {}, {:.6}%)".format( + print("{}: scanned documents matching ({} out of {}, {:.6}%)".format( 'OK' if ratio > SCAN_MATCH_THRESHOLD else 'FAILURE', matching, total, ratio * 100 - ) + )) def random_checks(old_es, new_es, old_index, new_index, total_document_count, check_percentage): @@ -249,12 +251,12 @@ def random_checks(old_es, new_es, old_index, new_index, total_document_count, ch current_offset += num_elts if total % 100 == 0: - print 'processed {} items'.format(total) + print('processed {} items'.format(total)) ratio = float(matching) / total - print "{}: random documents matching ({} out of {}, {}%)".format( + print("{}: random documents matching ({} out of {}, {}%)".format( 'OK' if ratio > SCAN_MATCH_THRESHOLD else 'FAILURE', matching, total, int(ratio * 100) - ) + )) def check_mappings(old_mapping, new_mapping): @@ -267,10 +269,10 @@ def check_mappings(old_mapping, new_mapping): deep_diff = DeepDiff(old_mapping, new_mapping) if deep_diff != {}: - print "FAILURE: Index mappings do not match" + print("FAILURE: Index mappings do not match") pprint.pprint(deep_diff) else: - print "OK: Index mappings match" + print("OK: Index mappings match") def main(): @@ -284,26 +286,26 @@ def main(): old_index = args.old[1] new_index = args.new[1] - old_stats = old_es.indices.stats(index=old_index)['indices'].values()[0]['primaries'] - new_stats = new_es.indices.stats(index=new_index)['indices'].values()[0]['primaries'] + old_stats = list(old_es.indices.stats(index=old_index)['indices'].values())[0]['primaries'] + new_stats = list(new_es.indices.stats(index=new_index)['indices'].values())[0]['primaries'] #compare document count old_count = old_stats['docs']['count'] new_count = new_stats['docs']['count'] - print "{}: Document count ({} = {})".format( + print("{}: Document count ({} = {})".format( 'OK' if old_count == new_count else 'FAILURE', old_count, new_count - ) + )) old_size = old_stats['store']['size_in_bytes'] new_size = new_stats['store']['size_in_bytes'] - print "{}: Index size ({} = {})".format( + print("{}: Index size ({} = {})".format( 'OK' if old_count == new_count else 'FAILURE', old_size, new_size - ) + )) def get_mappings(es, index): # for 1.5.x, there is an extra 'mappings' field that holds the mappings. - mappings = es.indices.get_mapping(index=index).values()[0] + mappings = list(es.indices.get_mapping(index=index).values())[0] new_style = mappings.get('mappings', None) return new_style if new_style is not None else mappings diff --git a/util/elb_tls_policy_management_util/elb_tls_policy_management_util.py b/util/elb_tls_policy_management_util/elb_tls_policy_management_util.py new file mode 100644 index 00000000000..1c6b7d105f6 --- /dev/null +++ b/util/elb_tls_policy_management_util/elb_tls_policy_management_util.py @@ -0,0 +1,278 @@ +from __future__ import absolute_import +from __future__ import print_function +import boto3 +import click +import datetime + +elb_client = None + + +@click.group() +def cli(): + pass + + +def get_client(): + global elb_client + if elb_client is None: + elb_client = boto3.client('elb') + return elb_client + + +def get_policies(): + client = get_client() + response = client.describe_load_balancer_policies() + policy_infos = response['PolicyDescriptions'] + return policy_infos + + +def get_tls_security_policy_template_names(): + policy_infos = get_policies() + policy_template_names = list() + + for policy_info in policy_infos: + if policy_info['PolicyTypeName'] == 'SSLNegotiationPolicyType': + policy_template_names.append(policy_info['PolicyName']) + + return policy_template_names + + +def check_valid_policy(ctx, param, value): + list_of_valid_policy_names = get_tls_security_policy_template_names() + if value not in list_of_valid_policy_names: + raise click.BadParameter("""Could not find the specified policy version, + found versions: {0}""" + .format(list_of_valid_policy_names)) + return value + + +def get_elb_infos(): + client = get_client() + client.describe_load_balancers() + response = client.describe_load_balancers( + PageSize=400 + ) + return response['LoadBalancerDescriptions'] + + +def get_elb_names(): + elb_names = list() + for elb_info in get_elb_infos(): + elb_names.append(elb_info['LoadBalancerName']) + return elb_names + + +def print_header(header): + print("\n\n----------------------------------------------") + print(("[ ] {0}".format(header))) + print("----------------------------------------------") + + +def print_line_item(line_item): + print(("[ * ] {0}".format(line_item))) + + +def print_list(name, items_list): + print_header(name) + for item in items_list: + print_line_item(item) + + +def create_tls_policy(elb_name, policy_version_to_copy): + client = get_client() + policy_attributes = list() + # AWS will copy all the other attributes. + policy_attributes.append({ + "AttributeName": "Reference-Security-Policy", + "AttributeValue": policy_version_to_copy + }) + milli_datetime = str(int(datetime.datetime.now().strftime("%s")) * 1000) + print('Creating new policy for elb....') + new_policy_name = "SSLUpdateScript-SSLNegotiationPolicy-{0}-{1}".format( + elb_name, milli_datetime) + response = client.create_load_balancer_policy( + LoadBalancerName=elb_name, + PolicyName=new_policy_name, + PolicyTypeName='SSLNegotiationPolicyType', + PolicyAttributes=policy_attributes + ) + print('Done creating ...') + return new_policy_name + + +def elb_ref_policy(elb_name, policy_names): + ref_policies = list() + client = get_client() + + response = client.describe_load_balancer_policies( + LoadBalancerName=elb_name, + PolicyNames=policy_names + ) + + policies = response['PolicyDescriptions'] + for policy in policies: + if policy['PolicyTypeName'] == 'SSLNegotiationPolicyType': + for attribute in policy['PolicyAttributeDescriptions']: + if attribute['AttributeName'] == 'Reference-Security-Policy': + ref_policies.append(attribute['AttributeValue']) + return ref_policies + + +def get_reference_templates(elb_name): + client = get_client() + listener_descriptions = client.describe_load_balancers( + LoadBalancerNames=[ + elb_name, + ], + )['LoadBalancerDescriptions'][0]['ListenerDescriptions'] + reference_security_policies = list() + for listener_description in listener_descriptions: + if listener_description['Listener']['Protocol'] == 'HTTPS': + policy_names = listener_description['PolicyNames'] + elb_reference_policies = elb_ref_policy(elb_name, policy_names) + reference_security_policies.extend(elb_reference_policies) + return reference_security_policies + + +@click.command() +def show_available_policy_versions(): + list_of_valid_policy_names = get_tls_security_policy_template_names() + print_list('Available Policies: ', list_of_valid_policy_names) + + +@click.command() +def show_elb_policy_versions(): + print('\n Please be patient.. this may take a moment...\n\n') + elb_infos = get_elb_infos() + elbs_by_current_policy = {} + for elb_info in elb_infos: + elb_name = elb_info['LoadBalancerName'] + reference_templates = get_reference_templates(elb_name) + for reference_template in reference_templates: + if reference_template not in elbs_by_current_policy: + elbs_by_current_policy[reference_template] = [] + elbs_by_current_policy[reference_template].append(elb_name) + for policy_name in elbs_by_current_policy.keys(): + print_list(policy_name, elbs_by_current_policy[policy_name]) + print('\n\n') + + +@click.command() +@click.option('--policy_version', callback=check_valid_policy, + help='The TLS Policy version you would like to set', + required=True) +@click.option('--names', + required=False, + help=""" + Comma separated ELB names eg: + 'elb-name-app1,elb-name-app1'. + This field is case sensitive.""") +@click.option('--port_override', + required=False, + default=None, + help=""" + Force the tls updater to only pay attention to a specific port + By default it will find the correct port and do the right thing + this only matters if you have multiple tls listeners on different + ports""") +@click.option('--confirm', default=False, required=False, is_flag=True, + help='Set this when you actually want to do the update.') +def update_elb_policies(confirm, policy_version, names, port_override): + elb_names = get_elb_names() + elb_names_to_update = [] + + if names is not None: + names = names.replace(' ', '').split(',') + for name in names: + if name in elb_names: + elb_names_to_update.append(name) + else: + raise Exception('You must specify names...') + + elb_names_to_update = set(elb_names_to_update) + + if confirm is False: + print('\n\nIf I actually ran the update this would be the result:\n') + + if confirm is False: + print_list(policy_version, elb_names_to_update) + print('\nAppend --confirm to actually perform the update\n') + else: + for elb_name in elb_names_to_update: + tls_policy_name = create_tls_policy(elb_name, policy_version) + print(("Trying to update...{0}".format(elb_name))) + client = get_client() + + # Determine which policies are actually active + # on the ELB on the 443 listener, + # as AWS has all policies that have + # ever been active on the ELB in their policies endpoint + elbs = client.describe_load_balancers( + LoadBalancerNames=[ + elb_name, + ], + )['LoadBalancerDescriptions'] + + load_balancer_descriptions = list() + for elb in elbs: + if(elb['LoadBalancerName'] == elb_name): + load_balancer_descriptions.append(elb) + + load_balancer_description = load_balancer_descriptions[0] + + listeners = load_balancer_description['ListenerDescriptions'] + + active_policy_names = list() + tls_port = None + for listener in listeners: + if((port_override is not None and listener['Listener']['LoadBalancerPort'] == int(port_override)) or (port_override is None and listener['Listener']['Protocol'] == 'HTTPS')): + tls_port = listener['Listener']['LoadBalancerPort'] + active_policy_names.extend(listener['PolicyNames']) + break + + if(tls_port is None and port_override is not None): + print("""Skipped updating this ELB because it does not have a listener + on the specified override port\n""") + continue + + # Now remove the active TLS related policy from that list, + # this requires querying a different endpoint + # as there is no way to know which policies are active + # from the following endpoint: + policies = client.describe_load_balancer_policies( + LoadBalancerName=elb_name + )['PolicyDescriptions'] + + # Make a new list containing the new TLS policy, + # and any previously active policies that are not TLS policies + + non_tls_policies = list() + + for policy in policies: + if policy['PolicyTypeName'] != 'SSLNegotiationPolicyType': + non_tls_policies.append(policy) + + non_tls_policy_names = list() + for non_tls_policy in non_tls_policies: + non_tls_policy_names.append(non_tls_policy['PolicyName']) + + non_tls_policies_on_listener = list() + + for policy_name in active_policy_names: + if(policy_name in non_tls_policy_names): + non_tls_policies_on_listener.append(policy_name) + + policy_names = non_tls_policies_on_listener + [tls_policy_name] + response = client.set_load_balancer_policies_of_listener( + LoadBalancerName=elb_name, + LoadBalancerPort=tls_port, + PolicyNames=policy_names + ) + print(("Updated {0}\n".format(elb_name))) + +cli.add_command(show_available_policy_versions) +cli.add_command(show_elb_policy_versions) +cli.add_command(update_elb_policies) + +if __name__ == '__main__': + cli() diff --git a/util/elb_tls_policy_management_util/examples b/util/elb_tls_policy_management_util/examples new file mode 100644 index 00000000000..205560c6ed4 --- /dev/null +++ b/util/elb_tls_policy_management_util/examples @@ -0,0 +1,20 @@ +# +# Print help +python elb_tls_policy_management_util.py --help + +### List available policy versions +python elb_tls_policy_management_util.py show_available_policy_versions + +### List current policy versions for all elbs in account +python elb_tls_policy_management_util.py show_elb_policy_versions + +### Update ELBs by name +python elb_tls_policy_management_util.py update_elb_policies --policy_version ELBSecurityPolicy-TLS-1-1-2017-01 --names 'elb-name-app1,elb-name-app2' + +### Handle multiple listeners and non standard TLS port listeners +The default behaviour here will find the first HTTPS listener and set its policy to the policy you specify while preserving any additional policies attached to that listener. + +ELBs with multiple TLS listeners will be listed more than once in show_elb_policy_versions, to deal with these you will have to explicitly set the port like so: +python elb_tls_policy_management_util.py update_elb_policies --policy_version ELBSecurityPolicy-TLS-1-1-2017-01 --names 'elb-name-app1,elb-name-app2' --port_override + +You dont need to retarget when using the port override, as it will skip ELBs that dont have a listener on that port. \ No newline at end of file diff --git a/util/elb_tls_policy_management_util/requirements.txt b/util/elb_tls_policy_management_util/requirements.txt new file mode 100644 index 00000000000..a8ad9008830 --- /dev/null +++ b/util/elb_tls_policy_management_util/requirements.txt @@ -0,0 +1,8 @@ +boto3==1.4.8 +botocore==1.8.2 +click==6.7 +docutils==0.14 +jmespath==0.9.3 +python-dateutil==2.6.1 +s3transfer==0.1.11 +six==1.11.0 diff --git a/util/helm_values_to_rst_table_util/README.md b/util/helm_values_to_rst_table_util/README.md new file mode 100644 index 00000000000..28749900622 --- /dev/null +++ b/util/helm_values_to_rst_table_util/README.md @@ -0,0 +1,61 @@ + +Converts a helm charts' values.yaml file into an RST table. + +Example: + + + python helm_values_to_rst_table_util.py --values ../../../edx-notes-api/helmcharts/notes/values.yaml --subcharts mysql --subcharts elasticsearch + =================================================================================================== =================================================================================================== =================================================================================================== + Parameter Description Default + =================================================================================================== =================================================================================================== =================================================================================================== + app.replicaCount TODO 1 + app.image.repository TODO edxops/notes + app.image.tag TODO latest + app.image.pullPolicy TODO IfNotPresent + app.imagePullSecrets TODO [] + app.nameOverride TODO + app.fullnameOverride TODO + app.service.type TODO ClusterIP + app.service.port TODO 80 + app.ingress.enabled TODO False + app.ingress.hosts TODO [{'host': 'notes.local', 'paths': []}] + app.ingress.tls TODO [] + app.tolerations TODO [] + app.extraInitContainers TODO [] + app.config.ALLOWED_HOSTS TODO ['*'] + app.config.CLIENT_ID TODO + app.config.CLIENT_SECRET TODO + app.config.DATABASES.default.ENGINE TODO django.db.backends.mysql + app.config.DATABASES.default.HOST TODO notes-mysql + app.config.DATABASES.default.NAME TODO notes-db + app.config.DATABASES.default.OPTIONS.connect_timeout TODO 10 + app.config.DATABASES.default.PASSWORD TODO + app.config.DATABASES.default.PORT TODO 3306 + app.config.DATABASES.default.USER TODO notes-db-user + app.config.DISABLE_TOKEN_CHECK TODO False + app.config.ELASTICSEARCH_INDEX TODO edx_notes + app.config.ELASTICSEARCH_URL TODO http://notes-elasticsearch-client:9200 + app.config.HAYSTACK_CONNECTIONS.default.ENGINE TODO notesserver.highlight.ElasticsearchSearchEngine + app.config.HAYSTACK_CONNECTIONS.default.INDEX_NAME TODO notes + app.config.HAYSTACK_CONNECTIONS.default.URL TODO http://notes-elasticsearch-client:9200/ + app.config.JWT_AUTH.JWT_AUTH_COOKIE_HEADER_PAYLOAD TODO stage-edx-jwt-cookie-header-payload + app.config.JWT_AUTH.JWT_AUTH_COOKIE_SIGNATURE TODO stage-edx-jwt-cookie-signature + app.config.JWT_AUTH.JWT_AUTH_REFRESH_COOKIE TODO stage-edx-jwt-refresh-cookie + app.config.JWT_AUTH.JWT_ISSUERS TODO [] + app.config.JWT_AUTH.JWT_PUBLIC_SIGNING_JWK_SET TODO + app.config.RESULTS_DEFAULT_SIZE TODO 25 + app.config.RESULTS_MAX_SIZE TODO 250 + app.config.SECRET_KEY TODO + app.config.USERNAME_REPLACEMENT_WORKER TODO username_replacement_service_worker + app.config.LOG_SETTINGS_LOG_DIR TODO /var/tmp + app.config.LOG_SETTINGS_LOGGING_ENV TODO no_env + app.config.LOG_SETTINGS_DEV_ENV TODO True + app.config.LOG_SETTINGS_DEBUG TODO True + app.config.LOG_SETTINGS_LOCAL_LOGLEVEL TODO INFO + app.config.LOG_SETTINGS_EDX_FILENAME TODO edx.log + app.config.LOG_SETTINGS_SERVICE_VARIANT TODO edx-notes-api + elasticsearch.enabled TODO True + mysql.enabled TODO True + migrations.enabled TODO True + migrations.migrationContainerName TODO notes-migrations + =================================================================================================== =================================================================================================== =================================================================================================== \ No newline at end of file diff --git a/util/helm_values_to_rst_table_util/helm_values_to_rst_table_util.py b/util/helm_values_to_rst_table_util/helm_values_to_rst_table_util.py new file mode 100644 index 00000000000..a61d094b6c5 --- /dev/null +++ b/util/helm_values_to_rst_table_util/helm_values_to_rst_table_util.py @@ -0,0 +1,91 @@ +import click +import yaml + +@click.command() +@click.option('--values', help='Path to a values.yaml file', required=True) +@click.option('--subcharts', help='Sub chart values to ignore', multiple=True) +def cli(values, subcharts): + with open(values, 'r') as stream: + parsed_dict = yaml.safe_load(stream) + keys_from_yaml = collect_keys_from_yaml(parsed_dict, subcharts) + col_width = 99 + print_header(col_width) + for dot_format_key in keys_from_yaml: + value = extract_default_using_dot_key(dot_format_key, parsed_dict) + print_row(dot_format_key, value, col_width) + print_bar(col_width) + +def collect_keys_from_yaml(parsed_dict, subcharts): + aggregate = [] + outp = get_keys("", parsed_dict) + for entry in outp: + first_part_of_key = entry.split(".")[0] + + if first_part_of_key not in subcharts or entry.endswith(".enabled"): + aggregate.append(entry) + return aggregate + +def print_bar(col_width): + p1 = int(col_width) * "=" + p2 = int(col_width) * "=" + p3 = int(col_width) * "=" + print(f"{p1} {p2} {p3}") + +def print_header(col_width): + word1 = "Parameter" + num_spaces1 = col_width - len(word1) + num_spaces1 = num_spaces1 + 1 + spaces1 = " " * num_spaces1 + + word2 = "Description" + num_spaces2 = col_width - len(word2) + num_spaces2 = num_spaces2 + 1 + spaces2 = " " * num_spaces2 + + word3 = "Default" + num_spaces3 = col_width - len(word3) + spaces3 = " " * num_spaces3 + + print_bar(col_width) + print(f"{word1}{spaces1} {word2}{spaces2} {word3}{spaces3}") + print_bar(col_width) + +def print_row(dot_format_key, value, col_width): + space1 = (" " * (col_width - len(dot_format_key))) + space2 = (" " * (col_width - len(dot_format_key))) + space3 = " " * (len(dot_format_key) - 2) + print(f"{dot_format_key}{space1} TODO{space2}{space3}{value}") + +def get_keys(prefix, inp): + if isinstance(inp, dict): + aggregate = [] + for child_key in inp.keys(): + child = inp[child_key] + + if prefix is not "": + modified_prefix = prefix + "." + else: + modified_prefix = prefix + + if isinstance(child, dict): + aggregate.append(get_keys(modified_prefix + child_key, child)) + else: + aggregate.append(modified_prefix + child_key) + return flatten(aggregate); + +def extract_default_using_dot_key(dot_format_key, parsed_dict): + key_parts = dot_format_key.split(".") + result = parsed_dict + for key_part in key_parts: + result = result[key_part] + return result + +def flatten(target): + if target == []: + return target + if isinstance(target[0], list): + return flatten(target[0]) + flatten(target[1:]) + return target[:1] + flatten(target[1:]) + +if __name__ == '__main__': + cli() \ No newline at end of file diff --git a/util/helm_values_to_rst_table_util/requirements3.txt b/util/helm_values_to_rst_table_util/requirements3.txt new file mode 100644 index 00000000000..dcae9c809c1 --- /dev/null +++ b/util/helm_values_to_rst_table_util/requirements3.txt @@ -0,0 +1,2 @@ +Click==7.0 +PyYAML==5.1.2 diff --git a/util/install/ansible-bootstrap.sh b/util/install/ansible-bootstrap.sh index 63336a1db40..f24458f44ab 100755 --- a/util/install/ansible-bootstrap.sh +++ b/util/install/ansible-bootstrap.sh @@ -1,10 +1,10 @@ #!/usr/bin/env bash # -# Script for installing Ansible and the edX configuration repostory +# Script for installing Ansible and the edX configuration repository # onto a host to enable running ansible to complete configuration. # This script can be used by Docker, Packer or any other system -# for building images that requires having ansible available. +# for building images that require having ansible available. # # Can be run as follows: # @@ -26,7 +26,7 @@ if [[ -z "${CONFIGURATION_REPO}" ]]; then fi if [[ -z "${CONFIGURATION_VERSION}" ]]; then - CONFIGURATION_VERSION="master" + CONFIGURATION_VERSION=${OPENEDX_RELEASE-master} fi if [[ -z "${UPGRADE_OS}" ]]; then @@ -40,9 +40,9 @@ fi # # Bootstrapping constants # -VIRTUAL_ENV_VERSION="15.2.0" -PIP_VERSION="9.0.3" -SETUPTOOLS_VERSION="39.0.1" +VIRTUAL_ENV_VERSION="16.7.10" +PIP_VERSION="20.0.2" +SETUPTOOLS_VERSION="44.1.0" VIRTUAL_ENV="/tmp/bootstrap" PYTHON_BIN="${VIRTUAL_ENV}/bin" ANSIBLE_DIR="/tmp/ansible" @@ -76,10 +76,13 @@ then elif grep -q 'Xenial Xerus' /etc/os-release then SHORT_DIST="xenial" +elif grep -q 'Bionic Beaver' /etc/os-release +then + SHORT_DIST="bionic" else cat << EOF - This script is only known to work on Ubuntu Trusty and Xenial, + This script is only known to work on Ubuntu Trusty, Xenial, and Bionic; exiting. If you are interested in helping make installation possible on other platforms, let us know. @@ -91,6 +94,12 @@ EDX_PPA="deb http://ppa.edx.org ${SHORT_DIST} main" # Upgrade the OS apt-get update -y + +# To apt-key update in bionic, gnupg is needed. +if [[ "${SHORT_DIST}" == bionic ]] ;then + apt-get install -y gnupg +fi + apt-key update -y if [ "${UPGRADE_OS}" = true ]; then @@ -99,21 +108,28 @@ if [ "${UPGRADE_OS}" = true ]; then fi # Required for add-apt-repository -apt-get install -y software-properties-common python-software-properties +apt-get install -y software-properties-common +if [[ "${SHORT_DIST}" != bionic ]] && [[ "${SHORT_DIST}" != xenial ]];then + apt-get install -y python-software-properties +fi # Add git PPA add-apt-repository -y ppa:git-core/ppa -# For older software we need to install our own PPA. -apt-key adv --keyserver "${EDX_PPA_KEY_SERVER}" --recv-keys "${EDX_PPA_KEY_ID}" -add-apt-repository -y "${EDX_PPA}" +# For older software we need to install our own PPA +# Phased out with Ubuntu 18.04 Bionic +if [[ "${SHORT_DIST}" != bionic ]] ;then + apt-key adv --keyserver "${EDX_PPA_KEY_SERVER}" --recv-keys "${EDX_PPA_KEY_ID}" + add-apt-repository -y "${EDX_PPA}" +fi + # Install python 2.7 latest, git and other common requirements # NOTE: This will install the latest version of python 2.7 and # which may differ from what is pinned in virtualenvironments apt-get update -y -apt-get install -y python2.7 python2.7-dev python-pip python-apt python-yaml python-jinja2 build-essential sudo git-core libmysqlclient-dev libffi-dev libssl-dev +apt-get install -y python2.7 python2.7-dev python-pip python-apt python-jinja2 build-essential sudo git-core libmysqlclient-dev libffi-dev libssl-dev pip install --upgrade pip=="${PIP_VERSION}" @@ -160,4 +176,3 @@ else mkdir -p /edx/ansible/facts.d echo '{ "ansible_bootstrap_run": true }' > /edx/ansible/facts.d/ansible_bootstrap.json fi - diff --git a/util/install/generate-passwords.sh b/util/install/generate-passwords.sh index 417a7e9c39e..73f4f36dc12 100755 --- a/util/install/generate-passwords.sh +++ b/util/install/generate-passwords.sh @@ -3,7 +3,7 @@ # Read a list of Ansible variables that should have generated values, and make # a new file just like it, with the generated values. -TARGET=${OPENEDX_RELEASE-master} +TARGET=${CONFIGURATION_VERSION-${OPENEDX_RELEASE-master}} wget -q "https://raw.githubusercontent.com/edx/configuration/$TARGET/playbooks/sample_vars/passwords.yml" -O passwords-template.yml while IFS= read -r line; do diff --git a/util/install/install_stack.sh b/util/install/install_stack.sh deleted file mode 100755 index 814a4ba9e9c..00000000000 --- a/util/install/install_stack.sh +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/env bash - -# Setting OPENEDX_DEBUG makes this more verbose. -if [[ $OPENEDX_DEBUG ]]; then - set -x -fi - -# Stop if any command fails. -set -e - -function usage -{ - cat << EOM - - Usage: $ bash ${0##*/} [-b mount_base] [-v] [-h] STACK [RELEASE] - - Installs the Open edX devstack or fullstack. If you encounter any trouble - or have questions, head over to https://open.edx.org/getting-help. - - This script captures a log of all output produced during runtime, and saves - it in a .log file within the current directory. If you encounter an error - during installation, this is an invaluable tool for edX developers to help - discover what went wrong, so please share it if you reach out for support! - - NOTE: This script assumes you have never installed devstack before. - Installing multiple versions of devstack can often cause conflicts that - this script is not prepared to handle. - - STACK - Either 'fullstack' or 'devstack'. Fullstack mimics a production - environment, whereas devstack is useful if you plan on modifying the - Open edX code. You must specify this. - - If you choose fullstack, 'release' should be the latest Open edX - release. - - If you choose devstack, 'release' should be the latest Open edX - release or master. - - RELEASE - The release of Open edX to install. Defaults to \$OPENEDX_RELEASE. - Open edX releases are called "open-release/eucalyptus.1", - "open-release/eucalyptus.2", and so on. - - We recommend the latest stable open release for general members of the - open source community. Details on available open releases can be found - at: https://openedx.atlassian.net/wiki/display/DOC/Open+edX+Releases. - - If you plan on modifying the code, we recommend the "master" branch. - - -b mount_base - Customize the location of the source code that gets cloned during the - devstack provisioning. The default is the current directory. This - option is not valid if installing fullstack. - - -v - Verbose output from ansible playbooks. - - -h - Show this help and exit. - -EOM -} - - -ERROR='\033[0;31m' # Red -WARN='\033[1;33m' # Yellow -SUCCESS='\033[0;32m' # Green -NC='\033[0m' # No Color - -# Output verbosity -verbosity=0 -# OPENEDX_RELEASE -release="" -# Vagrant source code provision location -vagrant_mount_location="" - -while getopts "b:vh" opt; do - case "$opt" in - b) - if [[ $stack == "devstack" ]]; then - vagrant_mount_location=$OPTARG - else - echo -e "${ERROR}Fullstack has no mount location. The -b option is not valid for fullstack!${NC}" - exit 1 - fi - ;; - v) - verbosity=1 - ;; - h) - usage - exit - ;; - *) - usage - exit 1 - ;; - esac -done - -shift "$((OPTIND-1))" # Shift off the options we've already parsed - -# STACK is a required positional argument. -if [[ ! $1 ]]; then - echo "STACK is required" - usage - exit 1 -fi -stack=$1 -shift - -# RELEASE is an optional positional argument, defaulting to OPENEDX_RELEASE. -if [[ $1 ]]; then - release=$1 - shift -else - release=$OPENEDX_RELEASE -fi - -if [[ ! $release ]]; then - echo "You must specify RELEASE, or define OPENEDX_RELEASE before running." - exit 1 -fi - -# If there are positional arguments left, something is wrong. -if [[ $1 ]]; then - echo "Don't understand extra arguments: $*" - usage - exit 1 -fi - -mkdir -p logs -log_file=logs/install-$(date +%Y%m%d-%H%M%S).log -exec > >(tee $log_file) 2>&1 -echo "Capturing output to $log_file" -echo "Installation started at $(date '+%Y-%m-%d %H:%M:%S')" - -function finish { - echo "Installation finished at $(date '+%Y-%m-%d %H:%M:%S')" -} -trap finish EXIT - -export OPENEDX_RELEASE=$release -echo "Installing release '$OPENEDX_RELEASE'" - -# Check if mount location was changed -if [[ $vagrant_mount_location != "" ]]; then - echo "Changing Vagrant provision location to $vagrant_mount_location..." - export VAGRANT_MOUNT_BASE=$vagrant_mount_location -fi - -if [[ -d .vagrant ]]; then - echo -e "${ERROR}A .vagrant directory already exists here. If you already tried installing $stack, make sure to vagrant destroy the $stack machine and 'rm -rf .vagrant' before trying to reinstall. If you would like to install a separate $stack, change to a different directory and try running the script again.${NC}" - exit 1 -fi - -if [[ $stack == "devstack" ]]; then # Install devstack - # Warn if release chosen is not master or open-release (Eucalyptus and up) - if [[ $release != "master" && $release != "open-release"* ]]; then - echo -e "${WARN}The release you entered is not 'master' or an open-release. Please be aware that a branch other than master or a release other than the latest open-release could cause errors when installing $stack.${NC}" | fmt - fi - - wiki_link="https://openedx.atlassian.net/wiki/display/OpenOPS/Running+Devstack" - curl -fOL# https://raw.githubusercontent.com/edx/configuration/${OPENEDX_RELEASE}/vagrant/release/devstack/Vagrantfile - vagrant plugin install vagrant-vbguest -elif [[ $stack == "fullstack" ]]; then # Install fullstack - # Warn if release chosen is not open-release (Eucalyptus and up) - if [[ $release != "open-release"* ]]; then - echo -e "${WARN}The release you entered is not an open-release. Please be aware that a branch other than the latest open-release could cause errors when installing $stack.${NC}" - fi - - wiki_link="https://openedx.atlassian.net/wiki/display/OpenOPS/Running+Fullstack" - curl -fOL# https://raw.githubusercontent.com/edx/configuration/${OPENEDX_RELEASE}/vagrant/release/fullstack/Vagrantfile - vagrant plugin install vagrant-hostsupdater -else # Throw error - echo -e "${ERROR}Unrecognized stack name, must be either devstack or fullstack!${NC}" - exit 1 -fi - -# Check for verbosity level -if [[ $verbosity == 1 ]]; then - sed -i '' 's/-e xqueue_version=\$OPENEDX_RELEASE/-e xqueue_version=\$OPENEDX_RELEASE \\\'$'\n -vvv/' Vagrantfile -fi - -vagrant up --provider virtualbox - -# Set preview host. -if grep -q '192.168.33.10 preview.localhost' /etc/hosts; then - echo "Studio preview already enabled, skipping..." -else - echo "Enabling use of preview within Studio..." - sudo bash -c "echo '192.168.33.10 preview.localhost' >> /etc/hosts" -fi - -echo -e "${SUCCESS}Finished installing! You may now log in using 'vagrant ssh'" -echo -e "Refer to the edX wiki ($wiki_link) for more information on using $stack.${NC}" diff --git a/util/install/native.sh b/util/install/native.sh index f83272a60f7..68b3cf3d429 100644 --- a/util/install/native.sh +++ b/util/install/native.sh @@ -22,12 +22,57 @@ if [[ `lsb_release -rs` != "16.04" ]]; then exit fi +# Config.yml is required, must define LMS and CMS names, and the names +# must not infringe trademarks. + +if [[ ! -f config.yml ]]; then + echo 'You must create a config.yml file specifying the hostnames (and if' + echo 'needed, ports) of your LMS and Studio hosts.' + echo 'For example:' + echo ' EDXAPP_LMS_BASE: "11.22.33.44"' + echo ' EDXAPP_CMS_BASE: "11.22.33.44:18010"' + exit +fi + +grep -Fq EDXAPP_LMS_BASE config.yml +GREP_LMS=$? + +grep -Fq EDXAPP_CMS_BASE config.yml +GREP_CMS=$? + +if [[ $GREP_LMS == 1 ]] || [[ $GREP_CMS == 1 ]]; then + echo 'Your config.yml file must specify the hostnames (and if' + echo 'needed, ports) of your LMS and Studio hosts.' + echo 'For example:' + echo ' EDXAPP_LMS_BASE: "11.22.33.44"' + echo ' EDXAPP_CMS_BASE: "11.22.33.44:18010"' + exit +fi + +grep -Fq edx. config.yml +GREP_BAD_DOMAIN=$? + +if [[ $GREP_BAD_DOMAIN == 0 ]]; then + echo '*** NOTE: Open edX and edX are registered trademarks.' + echo 'You may not use "openedx." or "edx." as subdomains when naming your site.' + echo 'For more details, see the edX Trademark Policy: https://edx.org/trademarks' + echo '' + echo 'Here are some examples of unacceptable domain names:' + echo ' openedx.yourdomain.org' + echo ' edx.yourdomain.org' + echo ' openedxyourdomain.org' + echo ' yourdomain-edx.com' + echo '' + echo 'Please choose different domain names.' + exit +fi + ## ## Log what's happening ## mkdir -p logs -log_file=logs/install-$(date +%Y%m%d-%H%M%S).log +log_file=$(realpath logs/install-$(date +%Y%m%d-%H%M%S).log) exec > >(tee $log_file) 2>&1 echo "Capturing output to $log_file" echo "Installation started at $(date '+%Y-%m-%d %H:%M:%S')" @@ -55,9 +100,11 @@ sudo apt-get upgrade -y ## Install system pre-requisites ## sudo apt-get install -y build-essential software-properties-common curl git-core libxml2-dev libxslt1-dev python-pip libmysqlclient-dev python-apt python-dev libxmlsec1-dev libfreetype6-dev swig gcc g++ -sudo pip install --upgrade pip==9.0.3 -sudo pip install --upgrade setuptools==39.0.1 -sudo -H pip install --upgrade virtualenv==15.2.0 +# ansible-bootstrap installs yaml that pip 19 can't uninstall. +sudo apt-get remove -y python-yaml +sudo pip install --upgrade pip==20.0.2 +sudo pip install --upgrade setuptools==44.1.0 +sudo -H pip install --upgrade virtualenv==16.7.10 ## ## Overridable version variables in the playbooks. Each can be overridden @@ -67,7 +114,7 @@ VERSION_VARS=( edx_platform_version certs_version forum_version - xqueue_version + XQUEUE_VERSION configuration_version demo_version NOTIFIER_VERSION @@ -94,6 +141,8 @@ if [[ -f my-passwords.yml ]]; then EXTRA_VARS="-e@$(pwd)/my-passwords.yml $EXTRA_VARS" fi +EXTRA_VARS="-e@$(pwd)/config.yml $EXTRA_VARS" + CONFIGURATION_VERSION=${CONFIGURATION_VERSION-$OPENEDX_RELEASE} ## @@ -112,21 +161,29 @@ cd /var/tmp/configuration sudo -H pip install -r requirements.txt ## -## Run the edx_sandbox.yml playbook in the configuration/playbooks directory +## Run the openedx_native.yml playbook in the configuration/playbooks directory ## -cd /var/tmp/configuration/playbooks && sudo -E ansible-playbook -c local ./edx_sandbox.yml -i "localhost," $EXTRA_VARS "$@" +cd /var/tmp/configuration/playbooks && sudo -E ansible-playbook -c local ./openedx_native.yml -i "localhost," $EXTRA_VARS "$@" ansible_status=$? if [[ $ansible_status -ne 0 ]]; then echo " " - echo "========================================" + echo "============================================================" echo "Ansible failed!" - echo "----------------------------------------" + echo "------------------------------------------------------------" + echo " " + echo "Decoded error:" + # Find the FAILED line before the "to retry," line, and decode it. + awk '/to +retry,/{if (bad) print bad} /FAILED/{bad=$0}' $log_file | python3 /var/tmp/configuration/util/ansible_msg.py + echo " " + echo "============================================================" + echo "Installation failed!" + echo "------------------------------------------------------------" echo "If you need help, see https://open.edx.org/getting-help ." echo "When asking for help, please provide as much information as you can." echo "These might be helpful:" echo " Your log file is at $log_file" echo " Your environment:" env | egrep -i 'version|release' | sed -e 's/^/ /' - echo "========================================" + echo "============================================================" fi diff --git a/util/install/sandbox.sh b/util/install/sandbox.sh old mode 100644 new mode 100755 diff --git a/util/jenkins/add_new_xqueues_to_dashboard/__init__.py b/util/jenkins/add_new_xqueues_to_dashboard/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/util/jenkins/add_new_xqueues_to_dashboard/add_xqueue_to_dashboard.py b/util/jenkins/add_new_xqueues_to_dashboard/add_xqueue_to_dashboard.py new file mode 100644 index 00000000000..dddbfcc41a2 --- /dev/null +++ b/util/jenkins/add_new_xqueues_to_dashboard/add_xqueue_to_dashboard.py @@ -0,0 +1,129 @@ +from __future__ import absolute_import +from __future__ import print_function +import pprint +import re + +import boto3 +import botocore +import backoff +import click +import json + +MAX_TRIES = 1 + +class CwBotoWrapper(object): + def __init__(self): + self.client = boto3.client('cloudwatch') + + @backoff.on_exception(backoff.expo, + (botocore.exceptions.ClientError), + max_tries=MAX_TRIES) + def list_metrics(self, *args, **kwargs): + return self.client.list_metrics(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + (botocore.exceptions.ClientError), + max_tries=MAX_TRIES) + def put_dashboard(self, *args, **kwargs): + return self.client.put_dashboard(*args, **kwargs) + + +def generate_dashboard_widget_metrics( + cloudwatch, + namespace, + metric_name, + dimension_name, + properties={}, + right_axis_items=[] +): + pp = pprint.PrettyPrinter(indent=4) + + metrics = cloudwatch.list_metrics( + Namespace=namespace, MetricName=metric_name, Dimensions=[{"Name": dimension_name}] + ) + + values = [] + + for metric in metrics['Metrics']: + for dimension in metric['Dimensions']: + if dimension['Name'] == dimension_name: + values.append(dimension['Value']) + + values.sort() + + new_widget_metrics = [] + for value in values: + value_properties = properties.copy() + value_properties['label'] = value + if value in right_axis_items: + value_properties["yAxis"] = "right" + new_widget_metrics.append([namespace, metric_name, dimension_name, value, value_properties]) + + return new_widget_metrics + + +# * means that all arguments after cloudwatch are keyword arguments only and are not positional +def generate_dashboard_widget( + cloudwatch, + *, + x=0, + y, + title, + namespace, + metric_name, + dimension_name, + metrics_properties={}, + height, + width=24, + stacked=False, + region='us-east-1', + period=60, + right_axis_items=[] +): + return {'type': 'metric', 'height': height, 'width': width, 'x': x, 'y': y, + 'properties': { + 'period': period, 'view': 'timeSeries', 'stacked': stacked, 'region': region, + 'title': "{} (auto-generated)".format(title), + 'metrics': generate_dashboard_widget_metrics(cloudwatch, namespace, metric_name, dimension_name, + metrics_properties, right_axis_items=right_axis_items) + } + } + + +@click.command() +@click.option('--environment', '-e', required=True) +@click.option('--deploy', '-d', required=True, + help="Deployment (i.e. edx or stage)") +def generate_dashboard(environment, deploy): + pp = pprint.PrettyPrinter(indent=4) + cloudwatch = CwBotoWrapper() + + dashboard_name = "{}-{}-xqueues".format(environment, deploy) + xqueue_namespace = "xqueue/{}-{}".format(environment, deploy) + + widgets = [] + y_cord = 0 + height = 9 + + if deploy == 'edx' and environment == 'prod': + y_cord += height + height = 9 + + widgets.append(generate_dashboard_widget(cloudwatch, y=y_cord, height=height, + title="{}-{} Xqueue Queues".format(environment, deploy), + namespace=xqueue_namespace, metric_name="queue_length", + dimension_name="queue", + ) + ) + + dashboard_body = {'widgets': widgets} + + print("Dashboard Body") + pp.pprint(dashboard_body) + + cloudwatch.put_dashboard(DashboardName=dashboard_name, + DashboardBody=json.dumps(dashboard_body)) + + +if __name__ == '__main__': + generate_dashboard() diff --git a/util/jenkins/add_new_xqueues_to_dashboard/requirements.txt b/util/jenkins/add_new_xqueues_to_dashboard/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/add_new_xqueues_to_dashboard/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/ansible-provision.sh b/util/jenkins/ansible-provision.sh index 45bc202a4e7..23e2197923e 100644 --- a/util/jenkins/ansible-provision.sh +++ b/util/jenkins/ansible-provision.sh @@ -89,11 +89,12 @@ extra_vars_file="/var/tmp/extra-vars-$$.yml" sandbox_secure_vars_file="${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml" sandbox_internal_vars_file="${WORKSPACE}/configuration-internal/ansible/vars/developer-sandbox.yml" extra_var_arg="-e@${extra_vars_file}" +program_manager="false" if [[ $edx_internal == "true" ]]; then # if this is a an edx server include # the secret var file - extra_var_arg="-e@${sandbox_internal_vars_file} -e@${sandbox_secure_vars_file} -e@${extra_vars_file}" + extra_var_arg="-e@${sandbox_internal_vars_file} -e@${sandbox_secure_vars_file} -e@${extra_vars_file} -e DECRYPT_CONFIG_PRIVATE_KEY=$WORKSPACE/configuration-secure/ansible/keys/sandbox-remote-config/sandbox/private.key -e ENCRYPTED_CFG_DIR=$WORKSPACE/configuration-internal/sandbox-remote-config/sandbox -e UNENCRYPTED_CFG_DIR=$WORKSPACE" fi if [[ -z $region ]]; then @@ -127,14 +128,16 @@ fi if [[ -z $ami ]]; then if [[ $server_type == "full_edx_installation" ]]; then - ami="ami-c14d38be" + ami="ami-0d7c5de485513e2dd" + elif [[ $server_type == "ubuntu_18.04" ]]; then + ami="ami-07ebfd5b3428b6f4d" elif [[ $server_type == "ubuntu_16.04" || $server_type == "full_edx_installation_from_scratch" ]]; then - ami="ami-a4dc46db" + ami="ami-092546daafcc8bc0d" fi fi if [[ -z $instance_type ]]; then - instance_type="t2.large" + instance_type="r5.large" fi if [[ -z $instance_initiated_shutdown_behavior ]]; then @@ -173,12 +176,24 @@ if [[ -z $set_whitelabel ]]; then set_whitelabel="true" fi -if [[ -z $journals ]]; then - journals="false" +if [[ -z $registrar ]]; then + registrar="false" fi -if [[ -z $journals_version ]]; then - journals_version="master" +if [[ -z $registrar_version ]]; then + registrar_version="master" +fi + +if [[ -z $learner_portal ]]; then + learner_portal="false" +fi + +if [[ -z $learner_portal_version ]]; then + learner_portal_version="master" +fi + +if [[ $registrar == 'true' ]]; then + program_manager="true" fi @@ -195,17 +210,20 @@ edx_platform_version: $edxapp_version forum_version: $forum_version notifier_version: $notifier_version XQUEUE_VERSION: $xqueue_version -xserver_version: $xserver_version certs_version: $certs_version configuration_version: $configuration_version demo_version: $demo_version THEMES_VERSION: $themes_version -journals_version: $journals_version +registrar_version: $registrar_version +learner_portal_version: $learner_portal_version +program_manager_version: $program_manager_version edx_ansible_source_repo: ${configuration_source_repo} edx_platform_repo: ${edx_platform_repo} EDXAPP_PLATFORM_NAME: $sandbox_platform_name +SANDBOX_CONFIG: True +CONFIGURE_JWTS: True EDXAPP_STATIC_URL_BASE: $static_url_base EDXAPP_LMS_NGINX_PORT: 80 @@ -223,10 +241,22 @@ ANALYTICS_API_NGINX_PORT: 80 ANALYTICS_API_SSL_NGINX_PORT: 443 ANALYTICS_API_VERSION: $analytics_api_version -JOURNALS_NGINX_PORT: 80 -JOURNALS_SSL_NGINX_PORT: 443 -JOURNALS_VERSION: $journals_version -JOURNALS_ENABLED: $journals +REGISTRAR_NGINX_PORT: 80 +REGISTRAR_SSL_NGINX_PORT: 443 +REGISTRAR_VERSION: $registrar_version +REGISTRAR_ENABLED: $registrar + +LEARNER_PORTAL_NGINX_PORT: 80 +LEARNER_PORTAL_SSL_NGINX_PORT: 443 +LEARNER_PORTAL_VERSION: $learner_portal_version +LEARNER_PORTAL_ENABLED: $learner_portal +LEARNER_PORTAL_SANDBOX_BUILD: True + +PROGRAM_MANAGER_NGINX_PORT: 80 +PROGRAM_MANAGER_SSL_NGINX_PORT: 443 +PROGRAM_MANAGER_VERSION: $program_manager_version +PROGRAM_MANAGER_ENABLED: $program_manager +PROGRAM_MANAGER_SANDBOX_BUILD: True VIDEO_PIPELINE_BASE_NGINX_PORT: 80 VIDEO_PIPELINE_BASE_SSL_NGINX_PORT: 443 @@ -240,8 +270,10 @@ EDX_ANSIBLE_DUMP_VARS: true migrate_db: "yes" dns_name: $dns_name COMMON_HOSTNAME: $dns_name +COMMON_DEPLOY_HOSTNAME: ${deploy_host} COMMON_DEPLOYMENT: edx COMMON_ENVIRONMENT: sandbox +COMMON_LMS_BASE_URL: https://${deploy_host} nginx_default_sites: - lms @@ -280,9 +312,6 @@ EOF_PROFILING fi if [[ $edx_internal == "true" ]]; then - # if this isn't a public server add the github - # user and set edx_internal to True so that - # xserver is installed cat << EOF >> $extra_vars_file EDXAPP_PREVIEW_LMS_BASE: preview-${deploy_host} EDXAPP_LMS_BASE: ${deploy_host} @@ -325,21 +354,24 @@ ECOMMERCE_LMS_URL_ROOT: "https://${deploy_host}" ECOMMERCE_SOCIAL_AUTH_REDIRECT_IS_HTTPS: true ecommerce_create_demo_data: true -JOURNALS_URL_ROOT: "https://journals-{{ EDXAPP_LMS_BASE }}" -JOURNALS_API_URL: "https://journals-{{ EDXAPP_LMS_BASE }}/api/v1/" -JOURNALS_DISCOVERY_SERVICE_URL: "https://discovery-{{ EDXAPP_LMS_BASE }}" -JOURNALS_LMS_URL_ROOT: "https://{{ EDXAPP_LMS_BASE }}" -JOURNALS_SOCIAL_AUTH_REDIRECT_IS_HTTPS: true -JOURNALS_DISCOVERY_API_URL: "{{ JOURNALS_DISCOVERY_SERVICE_URL }}/api/v1/" -JOURNALS_DISCOVERY_JOURNALS_API_URL: "{{ JOURNALS_DISCOVERY_SERVICE_URL }}/journal/api/v1/" -JOURNALS_ECOMMERCE_BASE_URL: "{{ ECOMMERCE_ECOMMERCE_URL_ROOT }}" -JOURNALS_ECOMMERCE_API_URL: "{{ JOURNALS_ECOMMERCE_BASE_URL }}/api/v2/" -JOURNALS_ECOMMERCE_JOURNALS_API_URL: "{{ JOURNALS_ECOMMERCE_BASE_URL }}/journal/api/v1" -journals_create_demo_data: true - DISCOVERY_URL_ROOT: "https://discovery-${deploy_host}" DISCOVERY_SOCIAL_AUTH_REDIRECT_IS_HTTPS: true +REGISTRAR_URL_ROOT: "https://registrar-${deploy_host}" +REGISTRAR_API_ROOT: "https://registrar-${deploy_host}/api" +REGISTRAR_DISCOVERY_BASE_URL: "https://discovery-${deploy_host}" +REGISTRAR_LMS_BASE_URL: "https://${deploy_host}" +REGISTRAR_SOCIAL_AUTH_REDIRECT_IS_HTTPS: true + +LEARNER_PORTAL_URL_ROOT: "https://learner-portal-${deploy_host}" +LEARNER_PORTAL_DISCOVERY_BASE_URL: "https://discovery-${deploy_host}" +LEARNER_PORTAL_LMS_BASE_URL: "https://${deploy_host}" + +PROGRAM_MANAGER_URL_ROOT: "https://program-manager-${deploy_host}" +PROGRAM_MANAGER_DISCOVERY_BASE_URL: "https://discovery-${deploy_host}" +PROGRAM_MANAGER_LMS_BASE_URL: "https://${deploy_host}" +PROGRAM_MANAGER_REGISTRAR_API_BASE_URL: "https://registrar-${deploy_host}/api" + credentials_create_demo_data: true CREDENTIALS_LMS_URL_ROOT: "https://${deploy_host}" CREDENTIALS_DOMAIN: "credentials-${deploy_host}" @@ -358,6 +390,17 @@ VEDA_ENCODE_WORKER_VERSION: ${video_encode_worker_version:-master} EOF fi +encrypted_config_apps=(edxapp ecommerce ecommerce_worker analytics_api insights discovery credentials registrar edx_notes_api) + +for app in ${encrypted_config_apps[@]}; do + eval app_decrypt_and_copy_config_enabled=\${${app}_decrypt_and_copy_config_enabled} + if [[ ${app_decrypt_and_copy_config_enabled} == "true" ]]; then + cat << EOF >> $extra_vars_file +${app^^}_DECRYPT_CONFIG_ENABLED: true +${app^^}_COPY_CONFIG_ENABLED: true +EOF + fi +done if [[ $recreate == "true" ]]; then # vars specific to provisioning added to $extra-vars @@ -405,8 +448,16 @@ veda_pipeline_worker=${video_pipeline:-false} veda_encode_worker=${video_encode_worker:-false} video_pipeline_integration=${video_pipeline:-false} +# ansible overrides for master's integration environment setup +if [[ $registrar == "true" ]]; then + cat << EOF >> $extra_vars_file +COMMON_ENABLE_SPLUNKFORWARDER: true, +EDXAPP_ENABLE_ENROLLMENT_RESET: true, +EOF +fi + declare -A deploy -plays="edxapp forum ecommerce credentials discovery journals analyticsapi veda_web_frontend veda_pipeline_worker veda_encode_worker video_pipeline_integration notifier xqueue xserver certs demo testcourses" +plays="edxapp forum ecommerce credentials discovery analyticsapi veda_web_frontend veda_pipeline_worker veda_encode_worker video_pipeline_integration notifier xqueue certs demo testcourses registrar program_manager learner_portal" for play in $plays; do deploy[$play]=${!play} @@ -449,13 +500,25 @@ fi # set the hostname run_ansible set_hostname.yml -i "${deploy_host}," -e hostname_fqdn=${deploy_host} --user ubuntu +# master's integration environment setup +if [[ $registrar == "true" ]]; then + # vars specific to master's integration environment + cat << EOF >> $extra_vars_file +username: $registrar_user_email +email: $registrar_user_email +organization_key: $registrar_org_key +registrar_role: "organization_read_write_enrollments" +EOF + run_ansible masters_sandbox.yml -i "${deploy_host}," $extra_var_arg --user ubuntu +fi + if [[ $set_whitelabel == "true" ]]; then # Setup Whitelabel themes run_ansible whitelabel.yml -i "${deploy_host}," $extra_var_arg --user ubuntu fi if [[ $enable_newrelic == "true" ]]; then - run_ansible ../run_role.yml -i "${deploy_host}," -e role=newrelic_infrastructure $extra_var_arg --user ubuntu + run_ansible run_role.yml -i "${deploy_host}," -e role=newrelic_infrastructure $extra_var_arg --user ubuntu fi rm -f "$extra_vars_file" diff --git a/util/jenkins/assume-role.sh b/util/jenkins/assume-role.sh index 9087c02ca9a..efa528c7edc 100644 --- a/util/jenkins/assume-role.sh +++ b/util/jenkins/assume-role.sh @@ -25,3 +25,9 @@ assume-role() { set -x } +unassume-role () { + unset AWS_ACCESS_KEY_ID + unset AWS_SECRET_ACCESS_KEY + unset AWS_SECURITY_TOKEN + unset AWS_SESSION_TOKEN +} diff --git a/util/jenkins/build-ami.sh b/util/jenkins/build-ami.sh deleted file mode 100755 index 11c5b67b9c0..00000000000 --- a/util/jenkins/build-ami.sh +++ /dev/null @@ -1,142 +0,0 @@ -#!/bin/bash -x -# This script is meant to be run from jenkins and expects the -# following variables to be set: -# - BUILD_ID - set by jenkins, Unique ID of build -# - BUILD_NUMBER - set by jenkins, Build number -# - refs - repo revisions to pass to abbey. This is provided in YAML syntax, -# and we put the contents in a file that abbey reads. Refs are -# different from 'vars' in that each ref is set as a tag on the -# output AMI. -# - vars - other vars to pass to abbey. This is provided in YAML syntax, -# and we put the contents in a file that abby reads. -# - deployment - edx, edge, etc -# - environment - stage,prod, etc -# - play - forum, edxapp, xqueue, etc -# - base_ami - Optional AMI to use as base AMI for abby instance -# - configuration - the version of the configuration repo to use -# - configuration_secure - the version of the secure repo to use -# - jenkins_admin_ec2_key - location of the ec2 key to pass to abbey -# - jenkins_admin_configuration_secure_repo - the git repo to use for secure vars -# - use_blessed - whether or not to use blessed AMIs - -if [[ -z "$BUILD_ID" ]]; then - echo "BUILD_ID not specified." - exit -1 -fi - -if [[ -z "$BUILD_NUMBER" ]]; then - echo "BUILD_NUMBER not specified." - exit -1 -fi - -if [[ -z "$deployment" ]]; then - echo "deployment not specified." - exit -1 -fi - -if [[ -z "$environment" ]]; then - echo "environment not specified." - exit -1 -fi - -if [[ -z "$play" ]]; then - echo "play not specified." - exit -1 -fi - -if [[ -z "$jenkins_admin_ec2_key" ]]; then - echo "jenkins_admin_ec2_key not specified." - exit -1 -fi - -if [[ -z "$jenkins_admin_configuration_secure_repo" ]]; then - echo "jenkins_admin_configuration_secure_repo not specified." - exit -1 -fi - -export PYTHONUNBUFFERED=1 - -cd $WORKSPACE/configuration -configuration=`git rev-parse --short HEAD` -cd $WORKSPACE - -cd $WORKSPACE/configuration-secure -configuration_secure=`git rev-parse --short HEAD` -cd $WORKSPACE - -base_params="" -if [[ -n "$base_ami" ]]; then - base_params="-b $base_ami" -fi - -blessed_params="" -if [[ "$use_blessed" == "true" ]]; then - blessed_params="--blessed" -fi - -if [[ -e "configuration/playbooks/${play}.yml" ]]; then - playbookdir_params="--playbook-dir configuration/playbooks" -else - playbookdir_params="--playbook-dir ansible-private" -fi - -configurationprivate_params="" -if [[ ! -z "$configurationprivaterepo" ]]; then - configurationprivate_params="--configuration-private-repo $configurationprivaterepo" - if [[ ! -z "$configurationprivateversion" ]]; then - configurationprivate_params="$configurationprivate_params --configuration-private-version $configurationprivateversion" - fi -fi - -hipchat_params="" -if [[ ! -z "$hipchat_room_id" ]] && [[ ! -z "$hipchat_api_token" ]]; then - hipchat_params="--hipchat-room-id $hipchat_room_id --hipchat-api-token $hipchat_api_token" -fi - -datadog_params="" -if [[ ! -z "$DATADOG_API_KEY" ]]; then - datadog_params="--datadog-api-key $DATADOG_API_KEY" -fi - -cleanup_params="" -if [[ "$cleanup" == "false" ]]; then - cleanup_params="--no-cleanup" -fi -notification_params="" -if [[ ! -z "$callback_url" ]]; then - if [[ ! -z "$jobid" ]]; then - notification_params="--callback-url $callback_url$jobid" - curl "$callback_url$jobid/starting%20ansible" - fi -fi - -region_params="" -if [[ ! -z "$region" ]]; then - region_params="--region $region" -fi - -identity_params="--identity /edx/var/jenkins/.ssh/id_rsa" -if [[ ! -z "$identity_path" ]]; then - identity_params="--identity $identity_path" -fi - -cd configuration -pip install -r requirements.txt - -cd util/vpc-tools/ - -echo "$vars" > /var/tmp/$BUILD_ID-extra-vars.yml -cat /var/tmp/$BUILD_ID-extra-vars.yml - -configuration_internal_var="configuration_internal_version" -configurationinternalversion=$(grep "$configuration_internal_var" "/var/tmp/$BUILD_ID-extra-vars.yml" | awk -F: '{print $2}') - -configurationinternal_params="" -if [[ ! -z "$configurationinternalrepo" ]]; then - configurationinternal_params="--configuration-internal-repo $configurationinternalrepo" - if [[ ! -z "$configurationinternalversion" ]]; then - configurationinternal_params="$configurationinternal_params --configuration-internal-version $configurationinternalversion" - fi -fi - -python -u abbey.py -p $play -t m3.large -d $deployment -e $environment $base_params $blessed_params $playbookdir_params --vars /var/tmp/$BUILD_ID-extra-vars.yml -c $BUILD_NUMBER --configuration-version $configuration --configuration-secure-version $configuration_secure -k $jenkins_admin_ec2_key --configuration-secure-repo $jenkins_admin_configuration_secure_repo $configurationprivate_params $configurationinternal_params $hipchat_params $cleanup_params $notification_params $datadog_params $region_params $identity_params diff --git a/util/jenkins/check-celery-queues.py b/util/jenkins/check-celery-queues.py deleted file mode 100644 index 382bc1ef011..00000000000 --- a/util/jenkins/check-celery-queues.py +++ /dev/null @@ -1,167 +0,0 @@ -import redis -import click -import boto3 -import botocore -import backoff -from itertools import zip_longest - -max_tries = 5 - - -class RedisWrapper(object): - def __init__(self, *args, **kwargs): - self.redis = redis.StrictRedis(*args, **kwargs) - - @backoff.on_exception(backoff.expo, - (redis.exceptions.TimeoutError, - redis.exceptions.ConnectionError), - max_tries=max_tries) - def keys(self): - return self.redis.keys() - - @backoff.on_exception(backoff.expo, - (redis.exceptions.TimeoutError, - redis.exceptions.ConnectionError), - max_tries=max_tries) - def type(self, key): - return self.redis.type(key) - - @backoff.on_exception(backoff.expo, - (redis.exceptions.TimeoutError, - redis.exceptions.ConnectionError), - max_tries=max_tries) - def llen(self, key): - return self.redis.llen(key) - - -class CwBotoWrapper(object): - def __init__(self): - self.client = boto3.client('cloudwatch') - - @backoff.on_exception(backoff.expo, - (botocore.exceptions.ClientError), - max_tries=max_tries) - def list_metrics(self, *args, **kwargs): - return self.client.list_metrics(*args, **kwargs) - - @backoff.on_exception(backoff.expo, - (botocore.exceptions.ClientError), - max_tries=max_tries) - def put_metric_data(self, *args, **kwargs): - return self.client.put_metric_data(*args, **kwargs) - - @backoff.on_exception(backoff.expo, - (botocore.exceptions.ClientError), - max_tries=max_tries) - def describe_alarms_for_metric(self, *args, **kwargs): - return self.client.describe_alarms_for_metric(*args, **kwargs) - - @backoff.on_exception(backoff.expo, - (botocore.exceptions.ClientError), - max_tries=max_tries) - def put_metric_alarm(self, *args, **kwargs): - return self.client.put_metric_alarm(*args, **kwargs) - - -@click.command() -@click.option('--host', '-h', default='localhost', - help='Hostname of redis server') -@click.option('--port', '-p', default=6379, help='Port of redis server') -@click.option('--environment', '-e', required=True) -@click.option('--deploy', '-d', required=True, - help="Deployment (i.e. edx or edge)") -@click.option('--max-metrics', default=20, - help='Maximum number of CloudWatch metrics to publish') -@click.option('--threshold', default=50, - help='Default maximum queue length before alarm notification is' - + ' sent') -@click.option('--queue-threshold', type=(str, int), multiple=True, - help='Threshold per queue in format --queue-threshold' - + ' {queue_name} {threshold}. May be used multiple times') -@click.option('--sns-arn', '-s', help='ARN for SNS alert topic', required=True) -def check_queues(host, port, environment, deploy, max_metrics, threshold, - queue_threshold, sns_arn): - - thresholds = dict(queue_threshold) - - timeout = 1 - namespace = "celery/{}-{}".format(environment, deploy) - redis_client = RedisWrapper(host=host, port=port, socket_timeout=timeout, - socket_connect_timeout=timeout) - cloudwatch = CwBotoWrapper() - metric_name = 'queue_length' - dimension = 'queue' - response = cloudwatch.list_metrics(Namespace=namespace, - MetricName=metric_name, - Dimensions=[{'Name': dimension}]) - existing_queues = [] - for m in response["Metrics"]: - existing_queues.extend( - [d['Value'] for d in m["Dimensions"] if d['Name'] == dimension]) - - redis_queues = set([k.decode() for k in redis_client.keys() - if redis_client.type(k) == b'list']) - - all_queues = existing_queues + list( - set(redis_queues).difference(existing_queues) - ) - - for queues in grouper(all_queues, max_metrics): - # grouper can return a bunch of Nones and we want to skip those - queues = [q for q in queues if q is not None] - metric_data = [] - for queue in queues: - metric_data.append({ - 'MetricName': metric_name, - 'Dimensions': [{ - "Name": dimension, - "Value": queue - }], - 'Value': redis_client.llen(queue) - }) - - if len(metric_data) > 0: - cloudwatch.put_metric_data(Namespace=namespace, MetricData=metric_data) - - for queue in queues: - dimensions = [{'Name': dimension, 'Value': queue}] - queue_threshold = threshold - if queue in thresholds: - queue_threshold = thresholds[queue] - # Period is in seconds - period = 60 - evaluation_periods = 15 - comparison_operator = "GreaterThanThreshold" - treat_missing_data = "notBreaching" - statistic = "Maximum" - actions = [sns_arn] - alarm_name = "{}-{} {} queue length over threshold".format(environment, - deploy, - queue) - - print('Creating or updating alarm "{}"'.format(alarm_name)) - cloudwatch.put_metric_alarm(AlarmName=alarm_name, - AlarmDescription=alarm_name, - Namespace=namespace, - MetricName=metric_name, - Dimensions=dimensions, - Period=period, - EvaluationPeriods=evaluation_periods, - TreatMissingData=treat_missing_data, - Threshold=queue_threshold, - ComparisonOperator=comparison_operator, - Statistic=statistic, - InsufficientDataActions=actions, - OKActions=actions, - AlarmActions=actions) - -# Stolen right from the itertools recipes -# https://docs.python.org/3/library/itertools.html#itertools-recipes -def grouper(iterable, n, fillvalue=None): - "Collect data into fixed-length chunks or blocks" - # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" - args = [iter(iterable)] * n - return zip_longest(*args, fillvalue=fillvalue) - -if __name__ == '__main__': - check_queues() diff --git a/util/jenkins/check-ses-limits.py b/util/jenkins/check-ses-limits.py index ab16d49f42f..838c63afbe2 100755 --- a/util/jenkins/check-ses-limits.py +++ b/util/jenkins/check-ses-limits.py @@ -2,6 +2,8 @@ # This script is used by the monioring/check-seslimits Jenkins job +from __future__ import absolute_import +from __future__ import print_function import boto3 import argparse import sys @@ -29,7 +31,7 @@ def __call__(self, parser, namespace, values, option_string=None): if args.warning and args.warning >= args.critical: warn_str = "Warning threshold ({})".format(args.warning) crit_str = "Critical threshold ({})".format(args.critical) - print("ERROR: {} >= {}".format(warn_str, crit_str)) + print(("ERROR: {} >= {}".format(warn_str, crit_str))) sys.exit(1) exit_code = 0 @@ -49,8 +51,8 @@ def __call__(self, parser, namespace, values, option_string=None): level = "WARNING" if level: - print("{} {}/{} ({}%) - {}".format(region, current, limit, percent, - level)) + print(("{} {}/{} ({}%) - {}".format(region, current, limit, percent, + level))) exit_code += 1 sys.exit(exit_code) diff --git a/util/jenkins/check_celery_progress/check_celery_progress.py b/util/jenkins/check_celery_progress/check_celery_progress.py new file mode 100644 index 00000000000..dd85a057a41 --- /dev/null +++ b/util/jenkins/check_celery_progress/check_celery_progress.py @@ -0,0 +1,526 @@ +from __future__ import absolute_import +from __future__ import print_function +import sys +import pickle +import json +import datetime +import base64 +import zlib +import redis +import click +import backoff +import boto3 +import botocore +from itertools import zip_longest +from celery import Celery +from opsgenie.swagger_client import AlertApi +from opsgenie.swagger_client import configuration +from opsgenie.swagger_client.models import CreateAlertRequest, CloseAlertRequest +from opsgenie.swagger_client.rest import ApiException +from textwrap import dedent + + +MAX_TRIES = 5 +QUEUE_AGE_HASH_NAME = "queue_age_monitoring" +DATE_FORMAT = '%Y-%m-%d %H:%M:%S.%f' + + +class RedisWrapper(object): + def __init__(self, dev_test_mode=None, *args, **kwargs): + assert isinstance(dev_test_mode, bool) + self.dev_test_mode = dev_test_mode + self.redis = redis.StrictRedis(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def keys(self): + return list(self.redis.keys()) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def type(self, key): + return self.redis.type(key) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def llen(self, key): + return self.redis.llen(key) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def lindex(self, key, index): + return self.redis.lindex(key, index) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def hgetall(self, key): + return self.redis.hgetall(key) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def delete(self, key): + if self.dev_test_mode: + print(("Test Mode: would have run redis.delete({})".format(key))) + else: + return self.redis.delete(key) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def hset(self, *args): + if self.dev_test_mode: + print(("Test Mode: would have run redis.hset({})".format(args))) + else: + return self.redis.hset(*args) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def hmset(self, *args): + if self.dev_test_mode: + print(("Test Mode: would have run redis.hmset({})".format(args))) + else: + return self.redis.hmset(*args) + + +class CwBotoWrapper(object): + def __init__(self, dev_test_mode=None): + assert isinstance(dev_test_mode, bool) + self.dev_test_mode = dev_test_mode + self.client = boto3.client('cloudwatch') + + @backoff.on_exception(backoff.expo, + (botocore.exceptions.ClientError), + max_tries=MAX_TRIES) + def put_metric_data(self, *args, **kwargs): + if self.dev_test_mode: + print(("Test Mode: would have run put_metric_data({},{})".format(args, kwargs))) + else: + return self.client.put_metric_data(*args, **kwargs) + + +def pretty_json(obj): + return json.dumps(obj, indent=4, sort_keys=True) + + +def pretty_state(state): + output = {} + for queue_name, queue_state in state.items(): + output[queue_name] = {} + for key, value in queue_state.items(): + if key == 'first_occurance_time': + value = str_from_datetime(value) + output[queue_name][key] = value + return pretty_json(output) + + +def datetime_from_str(string): + return datetime.datetime.strptime(string, DATE_FORMAT) + + +def str_from_datetime(dt): + return dt.strftime(DATE_FORMAT) + + +def unpack_state(packed_state): + decoded_state = {k.decode("utf-8"): v.decode("utf-8") for k, v in packed_state.items()} + unpacked_state = {} + + for key, value in decoded_state.items(): + decoded_value = json.loads(value) + unpacked_state[key] = { + 'correlation_id': decoded_value['correlation_id'], + 'first_occurance_time': datetime_from_str(decoded_value['first_occurance_time']), + 'alert_created': decoded_value['alert_created'], + } + + return unpacked_state + + +def pack_state(unpacked_state): + packed_state = {} + for queue_name, queue_state in unpacked_state.items(): + dt_str = str_from_datetime(queue_state['first_occurance_time']) + packed_state[queue_name] = json.dumps({ + 'correlation_id': queue_state['correlation_id'], + 'first_occurance_time': dt_str, + 'alert_created': queue_state['alert_created'], + }) + return packed_state + + +def build_new_state(old_state, queue_first_items, current_time): + new_state = {} + for queue_name, first_item in queue_first_items.items(): + # TODO: Handle keys missing in data + correlation_id = first_item['properties']['correlation_id'] + first_occurance_time = current_time + alert_created = False + if queue_name in old_state: + old_correlation_id = old_state[queue_name]['correlation_id'] + alert_created = old_state[queue_name]['alert_created'] + if old_correlation_id == correlation_id: + first_occurance_time = old_state[queue_name]['first_occurance_time'] + + new_state[queue_name] = { + 'correlation_id': correlation_id, + 'first_occurance_time': first_occurance_time, + 'alert_created': alert_created, + } + + return new_state + + +def generate_alert_message(environment, deploy, queue_name, threshold): + return str.format("{}-{} {} queue is stale. Stationary for over {}s", environment, deploy, queue_name, threshold) + + +def generate_alert_alias(environment, deploy, queue_name): + return str.format("{}-{} {} stale celery queue", environment, deploy, queue_name) + + +@backoff.on_exception(backoff.expo, + (ApiException), + max_tries=MAX_TRIES) +def create_alert(opsgenie_api_key, environment, deploy, queue_name, threshold, info, dev_test_mode=None): + assert isinstance(dev_test_mode, bool) + + configuration.api_key['Authorization'] = opsgenie_api_key + configuration.api_key_prefix['Authorization'] = 'GenieKey' + + alert_msg = generate_alert_message(environment, deploy, queue_name, threshold) + alias = generate_alert_alias(environment, deploy, queue_name) + + if dev_test_mode: + print(("Test Mode: would have created Alert: {}".format(alias))) + else: + print(("Creating Alert: {}".format(alias))) + response = AlertApi().create_alert(body=CreateAlertRequest(message=alert_msg, alias=alias, description=info)) + print(('request id: {}'.format(response.request_id))) + print(('took: {}'.format(response.took))) + print(('result: {}'.format(response.result))) + + +@backoff.on_exception(backoff.expo, + (ApiException), + max_tries=MAX_TRIES) +def close_alert(opsgenie_api_key, environment, deploy, queue_name, dev_test_mode=None): + assert isinstance(dev_test_mode, bool) + + configuration.api_key['Authorization'] = opsgenie_api_key + configuration.api_key_prefix['Authorization'] = 'GenieKey' + + alias = generate_alert_alias(environment, deploy, queue_name) + + if dev_test_mode: + print(("Test Mode: would have closed Alert: {}".format(alias))) + else: + print(("Closing Alert: {}".format(alias))) + # Need body=CloseAlertRequest(source="") otherwise OpsGenie API complains that body must be a json object + response = AlertApi().close_alert(identifier=alias, identifier_type='alias', body=CloseAlertRequest(source="")) + print(('request id: {}'.format(response.request_id))) + print(('took: {}'.format(response.took))) + print(('result: {}'.format(response.result))) + + +def extract_body(task): + body = base64.b64decode(task['body']) + body_dict = {} + + if ( + 'headers' in task and + 'compression' in task['headers'] and + task['headers']['compression'] == 'application/x-gzip' + ): + body = zlib.decompress(body) + + if task.get('content-type') == 'application/json': + body_dict = json.loads(body.decode("utf-8")) + elif task.get('content-type') == 'application/x-python-serialize': + body_dict = {k.decode("utf-8"): v for k, v in pickle.loads(body, encoding='bytes').items()} + return body_dict + + +def generate_info( + queue_name, + correlation_id, + body, + active_tasks, + do_alert, + first_occurance_time, + current_time, + next_task_age, + threshold, + default_threshold, + jenkins_build_url, +): + next_task = "Key missing" + args = "Key missing" + kwargs = "Key missing" + + if 'task' in body: + next_task = body['task'] + + if 'args' in body: + args = body['args'] + + if 'kwargs' in body: + kwargs = body['kwargs'] + + output = str.format( + dedent(""" + ============================================= + queue_name = {} + --------------------------------------------- + do_alert = {} + threshold = {} seconds + default_threshold = {} seconds + jenkins_build_url = {} + current_time = {} + --------------------------------------------- + Next Task: + first_occurance_time = {} + age = {} seconds + correlation_id = {} + task_name = {} + args = {} + kwargs = {} + --------------------------------------------- + active_tasks = {} + ============================================= + """), + queue_name, + do_alert, + threshold, + default_threshold, + jenkins_build_url, + current_time, + first_occurance_time, + next_task_age, + correlation_id, + next_task, + args, + kwargs, + active_tasks, + ) + return output + + +def celery_connection(host, port): + celery_client = " " + try: + broker_url = "redis://" + host + ":" + str(port) + celery_client = Celery(broker=broker_url) + except Exception as e: + print(("Exception in connection():", e)) + return celery_client + + +# Functionality added to get list of currently running tasks +# because Redis returns only the next tasks in the list +def get_active_tasks(celery_control, queue_workers, queue_name): + active_tasks = dict() + redacted_active_tasks = dict() + if queue_name in queue_workers: + workers = queue_workers[queue_name] + try: + for worker, data in celery_control.inspect(workers).active().items(): + for task in data: + active_tasks.setdefault( + task["hostname"], []).append([ + 'task: {}'.format(task.get("name")), + 'args: {}'.format(task.get("args")), + 'kwargs: {}'.format(task.get("kwargs")), + ]) + redacted_active_tasks.setdefault( + task["hostname"], []).append([ + 'task: {}'.format(task.get("name")), + 'args: REDACTED', + 'kwargs: REDACTED', + ]) + except Exception as e: + print(("Exception in get_active_tasks():", e)) + return (pretty_json(active_tasks), pretty_json(redacted_active_tasks)) + + +@click.command() +@click.option('--host', '-h', default='localhost', + help='Hostname of redis server', required=True) +@click.option('--port', '-p', default=6379, help='Port of redis server') +@click.option('--environment', '-e', required=True) +@click.option('--deploy', '-d', required=True, + help="Deployment (i.e. edx or edge)") +@click.option('--default-threshold', default=300, + help='Default queue maximum item age in seconds') +@click.option('--queue-threshold', type=(str, int), multiple=True, + help='Per queue maximum item age (seconds) in format --queue-threshold' + + ' {queue_name} {threshold}. May be used multiple times.') +@click.option('--opsgenie-api-key', '-k', envvar='OPSGENIE_API_KEY', required=True) +@click.option('--jenkins-build-url', '-j', envvar='BUILD_URL', required=False) +@click.option('--max-metrics', default=20, + help='Maximum number of CloudWatch metrics to publish') +@click.option('--dev-test-mode', is_flag=True, help='Enable dev (no-op) mode') +def check_queues(host, port, environment, deploy, default_threshold, queue_threshold, opsgenie_api_key, + jenkins_build_url, max_metrics, dev_test_mode): + ret_val = 0 + thresholds = dict(queue_threshold) + print(("Default Threshold (seconds): {}".format(default_threshold))) + print(("Per Queue Thresholds (seconds):\n{}".format(pretty_json(thresholds)))) + + timeout = 1 + redis_client = RedisWrapper(host=host, port=port, socket_timeout=timeout, + socket_connect_timeout=timeout, dev_test_mode=dev_test_mode) + celery_control = celery_connection(host, port).control + cloudwatch = CwBotoWrapper(dev_test_mode=dev_test_mode) + + namespace = "celery/{}-{}".format(environment, deploy) + metric_name = 'next_task_age' + dimension = 'queue' + next_task_age_metric_data = [] + + queue_names = set([k.decode() for k in redis_client.keys() + if (redis_client.type(k) == b'list' and + not k.decode().endswith(".pidbox") and + not k.decode().startswith("_kombu"))]) + queue_age_hash = redis_client.hgetall(QUEUE_AGE_HASH_NAME) + + # key: queue name, value: list of worker nodes for each queue + queue_workers = {} + try: + for worker, data in celery_control.inspect().active_queues().items(): + for queue in data: + queue_workers.setdefault(queue['name'], []).append(worker) + except Exception as e: + print(("Exception while getting queue to worker mappings:", e)) + + old_state = unpack_state(queue_age_hash) + # Temp debugging + print(("DEBUG: old_state\n{}\n".format(pretty_state(old_state)))) + queue_first_items = {} + current_time = datetime.datetime.now() + + for queue_name in queue_names: + # Use -1 to get end of queue, running redis monitor shows that celery + # uses BRPOP to pull items off the right end of the queue, so that's + # what we should be monitoring + queue_first_item = redis_client.lindex(queue_name, -1) + # Check that queue_first_item is not None which is the case if the queue is empty + if queue_first_item is not None: + queue_first_items[queue_name] = json.loads(queue_first_item.decode("utf-8")) + + new_state = build_new_state(old_state, queue_first_items, current_time) + + # Temp debugging + print(("DEBUG: new_state from new_state() function\n{}\n".format(pretty_state(new_state)))) + for queue_name, first_item in queue_first_items.items(): + redacted_body = "" + threshold = default_threshold + if queue_name in thresholds: + threshold = thresholds[queue_name] + + correlation_id = new_state[queue_name]['correlation_id'] + first_occurance_time = new_state[queue_name]['first_occurance_time'] + body = {} + try: + body = extract_body(first_item) + except Exception as error: + print(("ERROR: Unable to extract task body in queue {}, exception {}".format(queue_name, error))) + ret_val = 1 + redacted_body = {'task': body.get('task'), 'args': 'REDACTED', 'kwargs': 'REDACTED'} + active_tasks, redacted_active_tasks = get_active_tasks(celery_control, queue_workers, queue_name) + next_task_age = (current_time - first_occurance_time).total_seconds() + do_alert = next_task_age > threshold + + next_task_age_metric_data.append({ + 'MetricName': metric_name, + 'Dimensions': [{ + "Name": dimension, + "Value": queue_name + }], + 'Value': next_task_age, + 'Unit': 'Seconds', + }) + + info = generate_info( + queue_name, + correlation_id, + body, + active_tasks, + do_alert, + first_occurance_time, + current_time, + next_task_age, + threshold, + default_threshold, + jenkins_build_url, + ) + redacted_info = generate_info( + queue_name, + correlation_id, + redacted_body, + redacted_active_tasks, + do_alert, + first_occurance_time, + current_time, + next_task_age, + threshold, + default_threshold, + jenkins_build_url, + ) + print(info) + if not new_state[queue_name]['alert_created'] and do_alert: + create_alert(opsgenie_api_key, environment, deploy, queue_name, threshold, redacted_info, + dev_test_mode=dev_test_mode) + new_state[queue_name]['alert_created'] = True + elif new_state[queue_name]['alert_created'] and not do_alert: + close_alert(opsgenie_api_key, environment, deploy, queue_name, dev_test_mode=dev_test_mode) + new_state[queue_name]['alert_created'] = False + + for queue_name in set(old_state.keys()) - set(new_state.keys()): + print(("DEBUG: Checking cleared queue {}".format(queue_name))) + if old_state[queue_name]['alert_created']: + close_alert(opsgenie_api_key, environment, deploy, queue_name, dev_test_mode=dev_test_mode) + + redis_client.delete(QUEUE_AGE_HASH_NAME) + if new_state: + redis_client.hmset(QUEUE_AGE_HASH_NAME, pack_state(new_state)) + # Temp Debugging + print(("DEBUG: new_state pushed to redis\n{}\n".format(pretty_state(new_state)))) + + # Push next_task_age data to cloudwatch for tracking + if len(next_task_age_metric_data) > 0: + for metric_data_grouped in grouper(next_task_age_metric_data, max_metrics): + print(("next_task_age_metric_data {}".format(next_task_age_metric_data))) + cloudwatch.put_metric_data(Namespace=namespace, MetricData=next_task_age_metric_data) + + sys.exit(ret_val) + + +# Stolen right from the itertools recipes +# https://docs.python.org/3/library/itertools.html#itertools-recipes +def grouper(iterable, n, fillvalue=None): + "Collect data into fixed-length chunks or blocks" + # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" + args = [iter(iterable)] * n + chunks = zip_longest(*args, fillvalue=fillvalue) + # Remove Nones in function + for chunk in chunks: + yield [v for v in chunk if v is not None] + + +if __name__ == '__main__': + check_queues() diff --git a/util/jenkins/check_celery_progress/print_queue.py b/util/jenkins/check_celery_progress/print_queue.py new file mode 100644 index 00000000000..7e495fd4833 --- /dev/null +++ b/util/jenkins/check_celery_progress/print_queue.py @@ -0,0 +1,232 @@ +from __future__ import absolute_import +from __future__ import print_function +import sys +import pickle +import json +import datetime +import base64 +import zlib +import redis +import click +import backoff +from celery import Celery +from textwrap import dedent +from pprint import pprint +from six.moves import range + + +MAX_TRIES = 5 +QUEUE_AGE_HASH_NAME = "queue_age_monitoring" +DATE_FORMAT = '%Y-%m-%d %H:%M:%S.%f' + + +class RedisWrapper(object): + def __init__(self, *args, **kwargs): + self.redis = redis.StrictRedis(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def keys(self): + return list(self.redis.keys()) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def type(self, key): + return self.redis.type(key) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def llen(self, key): + return self.redis.llen(key) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def lindex(self, key, index): + return self.redis.lindex(key, index) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def hgetall(self, key): + return self.redis.hgetall(key) + + +def pretty_json(obj): + return json.dumps(obj, indent=4, sort_keys=True) + + +def unpack_state(packed_state): + decoded_state = {k.decode("utf-8"): v.decode("utf-8") for k, v in packed_state.items()} + unpacked_state = {} + + for key, value in decoded_state.items(): + decoded_value = json.loads(value) + unpacked_state[key] = { + 'correlation_id': decoded_value['correlation_id'], + 'first_occurance_time': datetime_from_str(decoded_value['first_occurance_time']), + 'alert_created': decoded_value['alert_created'], + } + + return unpacked_state + + +def extract_body(task): + body = base64.b64decode(task['body']) + body_dict = {} + + if 'headers' in task and 'compression' in task['headers'] and task['headers']['compression'] == 'application/x-gzip': + body = zlib.decompress(body) + + if task.get('content-type') == 'application/json': + body_dict = json.loads(body.decode("utf-8")) + elif task.get('content-type') == 'application/x-python-serialize': + body_dict = {k.decode("utf-8"): v for k, v in pickle.loads(body, encoding='bytes').items()} + return body_dict + + +def generate_info( + queue_name, + correlation_id, + body, + active_tasks, +): + next_task = "Key missing" + args = "Key missing" + kwargs = "Key missing" + + if 'task' in body: + next_task = body['task'] + + if 'args' in body: + args = body['args'] + + if 'kwargs' in body: + kwargs = body['kwargs'] + + output = str.format( + dedent(""" + ============================================= + queue_name = {} + correlation_id = {} + --------------------------------------------- + active_tasks = {} + --------------------------------------------- + next_task = {} + args = {} + kwargs = {} + ============================================= + """), + queue_name, + correlation_id, + active_tasks, + next_task, + args, + kwargs, + ) + return output + + +def celery_connection(host, port): + celery_client = " " + try: + broker_url = "redis://" + host + ":" + str(port) + celery_client = Celery(broker=broker_url) + except Exception as e: + print(("Exception in connection():", e)) + return celery_client + + +# Functionality added to get list of currently running tasks +# because Redis returns only the next tasks in the list +def get_active_tasks(celery_control, queue_workers, queue_name): + active_tasks = dict() + redacted_active_tasks = dict() + if queue_name in queue_workers: + workers = queue_workers[queue_name] + try: + for worker, data in celery_control.inspect(workers).active().items(): + for task in data: + active_tasks.setdefault( + task["hostname"], []).append([ + 'task: {}'.format(task.get("name")), + 'args: {}'.format(task.get("args")), + 'kwargs: {}'.format(task.get("kwargs")), + ]) + redacted_active_tasks.setdefault( + task["hostname"], []).append([ + 'task: {}'.format(task.get("name")), + 'args: REDACTED', + 'kwargs: REDACTED', + ]) + except Exception as e: + print(("Exception in get_active_tasks():", e)) + return (pretty_json(active_tasks), pretty_json(redacted_active_tasks)) + + +@click.command() +@click.option('--host', '-h', default='localhost', + help='Hostname of redis server', required=True) +@click.option('--port', '-p', default=6379, help='Port of redis server') +@click.option('--queue', '-q', required=True) +@click.option('--items', '-i', default=1, help='Number of items to print') +@click.option('--body/--no-body', default=False, help='Print full body of tasks') +def check_queues(host, port, queue, items, body): + queue_name = queue + ret_val = 0 + + timeout = 1 + redis_client = RedisWrapper(host=host, port=port, socket_timeout=timeout, + socket_connect_timeout=timeout) + celery_control = celery_connection(host, port).control + + # key: queue name, value: list of worker nodes for each queue + queue_workers = {} + try: + for worker, data in celery_control.inspect().active_queues().items(): + for queue in data: + queue_workers.setdefault(queue['name'], []).append(worker) + except Exception as e: + print(("Exception while getting queue to worker mappings:", e)) + + for count in range(items): + print(("Count: {}".format(count))) + queue_first_item = redis_client.lindex(queue_name, count) + # Check that queue_first_item is not None which is the case if the queue is empty + if queue_first_item is not None: + queue_first_item_decoded = json.loads(queue_first_item.decode("utf-8")) + + correlation_id = queue_first_item_decoded['properties']['correlation_id'] + + extracted_body = {} + try: + extracted_body = extract_body(queue_first_item_decoded) + except Exception as error: + print(("ERROR: Unable to extract task body in queue {}, exception {}".format(queue_name, error))) + ret_val = 1 + active_tasks, redacted_active_tasks = get_active_tasks(celery_control, queue_workers, queue_name) + + info = generate_info( + queue_name, + correlation_id, + extracted_body, + active_tasks, + ) + print(info) + if body: + print("BODY") + pprint(extracted_body) + + sys.exit(ret_val) + + +if __name__ == '__main__': + check_queues() diff --git a/util/jenkins/check_celery_progress/requirements.txt b/util/jenkins/check_celery_progress/requirements.txt new file mode 100644 index 00000000000..5b544f83d8d --- /dev/null +++ b/util/jenkins/check_celery_progress/requirements.txt @@ -0,0 +1,36 @@ +# +# This file is autogenerated by pip-compile +# To update, run: +# +# make upgrade +# +amqp==1.4.9 # via kombu +anyjson==0.3.3 # via kombu +awscli==1.14.32 # via -r requirements/celery_progress.in +backoff==1.4.3 # via -r requirements/celery_progress.in +billiard==3.3.0.23 # via celery +boto3==1.5.4 # via -r requirements/celery_progress.in +botocore==1.8.36 # via awscli, boto3, s3transfer +celery==3.1.25 # via -r requirements/celery_progress.in +certifi==2020.4.5.1 # via opsgenie-sdk, requests +chardet==3.0.4 # via requests +click==6.7 # via -r requirements/celery_progress.in +colorama==0.3.7 # via awscli +docutils==0.16 # via awscli, botocore +idna==2.9 # via requests +jmespath==0.9.5 # via boto3, botocore +kombu==3.0.37 # via celery +opsgenie-sdk==0.3.1 # via -r requirements/celery_progress.in +pyasn1==0.4.8 # via rsa +python-dateutil==2.8.1 # via botocore, opsgenie-sdk +pytz==2020.1 # via celery, opsgenie-sdk +pyyaml==3.12 # via awscli +redis==2.10.6 # via -r requirements/celery_progress.in +requests==2.23.0 # via opsgenie-sdk +rsa==3.4.2 # via awscli +s3transfer==0.1.13 # via awscli, boto3 +six==1.14.0 # via opsgenie-sdk, python-dateutil +urllib3==1.25.9 # via opsgenie-sdk, requests + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/util/jenkins/check_celery_progress/test_check_celery_progress.py b/util/jenkins/check_celery_progress/test_check_celery_progress.py new file mode 100644 index 00000000000..25f6f4cb922 --- /dev/null +++ b/util/jenkins/check_celery_progress/test_check_celery_progress.py @@ -0,0 +1,164 @@ +from __future__ import absolute_import +import unittest +import datetime +from datetime import timedelta +from check_celery_progress import build_new_state, datetime_from_str, should_create_alert, pack_state, unpack_state + +class TestCheckCeleryQueues(unittest.TestCase): + + queue_first_items_0 = { + "edx.lms.core.grades_policy_change": { + "body": "", + "headers": { + "compression": "application/x-gzip" + }, + "content-type": "application/json", + "properties": { + "body_encoding": "base64", + "correlation_id": "8a661b24-cf3a-49f6-ba72-824d20d1cc02", + "reply_to": "9d88dd87-c55f-3d95-a725-176db14a16dd", + "delivery_info": { + "priority": 0, + "routing_key": "edx.lms.core.grades_policy_change", + "exchange": "edx.lms.core" + }, + "delivery_mode": 2, + "delivery_tag": "0efb9ecc-2914-4a3c-98fb-358a67d678d8" + }, + "content-encoding": "utf-8" + }, + "edx.lms.core.default": { + "body": "", + "headers": { + "compression": "application/x-gzip" + }, + "content-type": "application/json", + "properties": { + "body_encoding": "base64", + "correlation_id": "27d76be4-408b-4f5b-a132-5ad043465e90", + "reply_to": "2e6506c2-46dd-3dd7-9be0-a03e0e5c2f7f", + "delivery_info": { + "priority": 0, + "routing_key": "edx.lms.core.default", + "exchange": "edx.lms.core" + }, + "delivery_mode": 2, + "delivery_tag": "2627a9a2-1941-4890-8fe3-c477c62b707d" + }, + "content-encoding": "utf-8" + } + } + + + queue_first_items_1 = { + "edx.lms.core.grades_policy_change": { + "body": "", + "headers": { + "compression": "application/x-gzip" + }, + "content-type": "application/json", + "properties": { + "body_encoding": "base64", + "correlation_id": "8a661b24-cf3a-49f6-ba72-824d20d1cc02", + "reply_to": "9d88dd87-c55f-3d95-a725-176db14a16dd", + "delivery_info": { + "priority": 0, + "routing_key": "edx.lms.core.grades_policy_change", + "exchange": "edx.lms.core" + }, + "delivery_mode": 2, + "delivery_tag": "0efb9ecc-2914-4a3c-98fb-358a67d678d8" + }, + "content-encoding": "utf-8" + }, + "edx.lms.core.default": { + "body": "", + "headers": { + "compression": "application/x-gzip" + }, + "content-type": "application/json", + "properties": { + "body_encoding": "base64", + "correlation_id": "27d76be4-408b-4f5b-a132-c0ffee465e90", + "reply_to": "2e6506c2-46dd-3dd7-9be0-a03e0e5c2f7f", + "delivery_info": { + "priority": 0, + "routing_key": "edx.lms.core.default", + "exchange": "edx.lms.core" + }, + "delivery_mode": 2, + "delivery_tag": "2627a9a2-1941-4890-8fe3-c477c62b707d" + }, + "content-encoding": "utf-8" + } + } + + time_0 = datetime_from_str("2018-10-04 11:00:51.111367") + time_1_min = datetime_from_str("2018-10-04 11:01:51.111367") + + threshold = 5 * 60 + + def test_equal_output_if_queues_stuck(self): + state_0 = build_new_state({}, self.queue_first_items_0, self.time_0) + state_1 = build_new_state(state_0, self.queue_first_items_0, self.time_1_min) + self.assertEqual(state_0, state_1) + + def test_build_new_state_missing_alert_created(self): + state_0 = build_new_state({}, self.queue_first_items_0, self.time_0) + state_0['edx.lms.core.default'].pop('alert_created') + state_1 = build_new_state(state_0, self.queue_first_items_0, self.time_1_min) + self.assertFalse(state_1['edx.lms.core.default']['alert_created']) + + def test_build_new_state_alert_created(self): + state_0 = build_new_state({}, self.queue_first_items_0, self.time_0) + state_1 = build_new_state(state_0, self.queue_first_items_0, self.time_1_min) + self.assertFalse(state_0['edx.lms.core.grades_policy_change']['alert_created']) + self.assertFalse(state_0['edx.lms.core.default']['alert_created']) + self.assertFalse(state_1['edx.lms.core.grades_policy_change']['alert_created']) + self.assertFalse(state_1['edx.lms.core.default']['alert_created']) + + def test_build_new_state_alert_created_preserved(self): + state_0 = build_new_state({}, self.queue_first_items_0, self.time_0) + state_0['edx.lms.core.default']['alert_created'] = True + state_1 = build_new_state(state_0, self.queue_first_items_0, self.time_1_min) + self.assertTrue(state_1['edx.lms.core.default']['alert_created']) + + def test_output_1_queue_changed(self): + state_0 = build_new_state({}, self.queue_first_items_0, self.time_0) + state_1 = build_new_state(state_0, self.queue_first_items_1, self.time_1_min) + self.assertEqual(state_0['edx.lms.core.grades_policy_change'], state_1['edx.lms.core.grades_policy_change']) + self.assertEqual(state_1['edx.lms.core.default']['first_occurance_time'], self.time_1_min) + self.assertEqual(state_1['edx.lms.core.default']['correlation_id'], "27d76be4-408b-4f5b-a132-c0ffee465e90") + self.assertFalse(state_1['edx.lms.core.default']['alert_created']) + + def test_should_create_alert_0_delta(self): + first_occurance_time = self.time_0 + result = should_create_alert(first_occurance_time, self.time_0, self.threshold) + self.assertEqual(False, result) + + def test_should_create_alert_under_threshold(self): + first_occurance_time = self.time_0 - timedelta(seconds=self.threshold-60) + result = should_create_alert(first_occurance_time, self.time_0, self.threshold) + self.assertEqual(False, result) + + def test_should_create_alert_over_threshold(self): + first_occurance_time = self.time_0 - timedelta(seconds=self.threshold+60) + result = should_create_alert(first_occurance_time, self.time_0, self.threshold) + self.assertEqual(True, result) + + def test_should_create_alert_negative_delta(self): + first_occurance_time = self.time_0 + timedelta(seconds=self.threshold+60) + result = should_create_alert(first_occurance_time, self.time_0, self.threshold) + self.assertEqual(False, result) + + def test_pack_state(self): + # Round trip state to make sure all fields are preserved + state = build_new_state({}, self.queue_first_items_0, self.time_0) + packed_state = pack_state(state) + encoded_packed_state = {k.encode("utf-8"): v.encode("utf-8") for k, v in packed_state.items()} + unpacked_state = unpack_state(encoded_packed_state) + self.assertEqual(state, unpacked_state) + + +if __name__ == '__main__': + unittest.main() diff --git a/util/jenkins/check_table_size/check_table_size.py b/util/jenkins/check_table_size/check_table_size.py new file mode 100644 index 00000000000..7eca5671003 --- /dev/null +++ b/util/jenkins/check_table_size/check_table_size.py @@ -0,0 +1,154 @@ +from __future__ import absolute_import +from __future__ import print_function +import boto3 +from botocore.exceptions import ClientError +import sys +import backoff +import pymysql +import click + +MAX_TRIES = 5 + + +class EC2BotoWrapper: + def __init__(self): + self.client = boto3.client("ec2") + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_regions(self): + return self.client.describe_regions() + + +class RDSBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("rds", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_db_instances(self): + return self.client.describe_db_instances() + + +def rds_extractor(): + """ + Return list of all RDS instances across all the regions + Returns: + [ + { + 'name': name, + 'Endpoint': Endpoint of RDS + 'Port': Port of RDS + } + ] + """ + client_region = EC2BotoWrapper() + rds_list = [] + try: + regions_list = client_region.describe_regions() + except ClientError as e: + print(("Unable to connect to AWS with error :{}".format(e))) + sys.exit(1) + for region in regions_list["Regions"]: + client = RDSBotoWrapper(region_name=region["RegionName"]) + response = client.describe_db_instances() + for instance in response.get('DBInstances'): + # This condition use to skip irrelevant RDS + if "prod" in instance.get("Endpoint").get("Address") or "stage" in instance.get("Endpoint").get("Address"): + temp_dict = {} + temp_dict["name"] = instance["DBInstanceIdentifier"] + temp_dict["Endpoint"] = instance.get("Endpoint").get("Address") + temp_dict["Port"] = instance.get("Port") + rds_list.append(temp_dict) + return rds_list + + +def check_table_growth(rds_list, username, password, threshold, rds_threshold): + """ + Return: + Return list all tables that cross threshold limit + [ + { + "name": "string", + "db": "string", + "table": "string", + "size": "string", + } + ] + """ + try: + table_list = [] + for db in rds_list: + print("Checking table sizes for {}".format(db["Endpoint"])) + rds_host_endpoint = db["Endpoint"] + rds_port = db["Port"] + connection = pymysql.connect(host=rds_host_endpoint, + port=rds_port, user=username, password=password) + # prepare a cursor object using cursor() method + cursor = connection.cursor() + # execute SQL query using execute() method. + cursor.execute(""" + SELECT + table_schema as `Database`, + table_name AS `Table`, + round(((data_length + index_length) / 1024 / 1024), 2) `Size in MB` + FROM information_schema.TABLES + WHERE TABLE_SCHEMA NOT IN ('mysql', 'information_schema', 'performance_schema') + ORDER BY (data_length + index_length) DESC; + """) + + rds_result = cursor.fetchall() + cursor.close() + connection.close() + if db["name"] in rds_threshold: + threshold_limit = rds_threshold[db["name"]] + else: + threshold_limit = threshold + for tables in rds_result: + temp_dict = {} + if tables[2] is not None and tables[2] > float(threshold_limit): + temp_dict["rds"] = db["name"] + temp_dict["db"] = tables[0] + temp_dict["table"] = tables[1] + temp_dict["size"] = tables[2] + table_list.append(temp_dict) + return table_list + except Exception as ex: + print(ex) + sys.exit(1) + + +@click.command() +@click.option('--username', envvar='USERNAME', required=True) +@click.option('--password', envvar='PASSWORD', required=True) +@click.option('--threshold', required=True, help='Threshold for tables') +@click.option('--rdsthreshold', type=(str, int), multiple=True, help='Specific RDS threshold') +@click.option('--rdsignore', '-i', multiple=True, help='RDS name tags to not check, can be specified multiple times') +def controller(username, password, threshold, rdsthreshold, rdsignore): + """ + Control execution of all other functions + Arguments: + username (str): + Get this from cli args + + password (str): + Get this from cli args + threshold (str): + Get this from cli args + rdsthreshold (str, int): + Get this from cli args + """ + rds_threshold = dict(rdsthreshold) + rds_list = rds_extractor() + filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore]) + table_list = check_table_growth(filtered_rds_list, username, password, threshold, rds_threshold) + if len(table_list) > 0: + format_string = "{:<40}{:<20}{:<50}{}" + print((format_string.format("RDS Name","Database Name", "Table Name", "Size"))) + for items in table_list: + print((format_string.format(items["rds"], items["db"], items["table"], str(items["size"]) + " MB"))) + exit(1) + exit(0) + + +if __name__ == '__main__': + controller() + diff --git a/util/jenkins/check_table_size/requirements.txt b/util/jenkins/check_table_size/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/check_table_size/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/cloudflare-hit-rate.py b/util/jenkins/cloudflare-hit-rate.py index 955ca3a4c2f..4bb2c68dd30 100644 --- a/util/jenkins/cloudflare-hit-rate.py +++ b/util/jenkins/cloudflare-hit-rate.py @@ -3,6 +3,8 @@ https://api.cloudflare.com/#zone-analytics-dashboard """ +from __future__ import absolute_import +from __future__ import print_function import requests import argparse import sys @@ -30,7 +32,7 @@ def calcualte_cache_hit_rate(zone_id, auth_key, email, threshold): sys.exit(1) except Exception as error: - print("JSON Error: {} \n Content returned from API call: {}".format(error, res.text)) + print(("JSON Error: {} \n Content returned from API call: {}".format(error, res.text))) diff --git a/util/jenkins/export_dead_locks/export_dead_locks.py b/util/jenkins/export_dead_locks/export_dead_locks.py new file mode 100644 index 00000000000..c5f8c2e5745 --- /dev/null +++ b/util/jenkins/export_dead_locks/export_dead_locks.py @@ -0,0 +1,123 @@ +from __future__ import absolute_import +from __future__ import print_function +import boto3 +from botocore.exceptions import ClientError +import sys +import backoff +import pymysql +import time +import uuid +import click +import re +import splunklib.client as splunk_client + +MAX_TRIES = 5 + + +class EC2BotoWrapper: + def __init__(self): + self.client = boto3.client("ec2") + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_regions(self): + return self.client.describe_regions() + + +class RDSBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("rds", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_db_instances(self): + return self.client.describe_db_instances() + + +def rds_extractor(environment): + """ + Return list of all RDS instances across all the regions + Returns: + [ + { + 'name': name, + 'ARN': RDS ARN, + 'Region': Region of RDS + } + ] + """ + client_region = EC2BotoWrapper() + rds_list = [] + try: + regions_list = client_region.describe_regions() + except ClientError as e: + print(("Unable to connect to AWS with error :{}".format(e))) + sys.exit(1) + for region in regions_list["Regions"]: + rds_client = RDSBotoWrapper(region_name=region["RegionName"]) + response = rds_client.describe_db_instances() + for instance in response.get('DBInstances'): + if environment in instance.get("Endpoint").get("Address"): + temp_dict = {} + temp_dict["name"] = instance["DBInstanceIdentifier"] + temp_dict["ARN"] = instance["DBInstanceArn"] + temp_dict["Region"] = region["RegionName"] + temp_dict["Endpoint"] = instance.get("Endpoint").get("Address") + temp_dict["Username"] = instance.get("MasterUsername") + temp_dict["Port"] = instance.get("Port") + rds_list.append(temp_dict) + return rds_list + + +def rds_controller(rds_list, username, password, hostname, splunkusername, splunkpassword, port, indexname): + for item in rds_list: + rds_host_endpoint = item["Endpoint"] + rds_port = item["Port"] + connection = pymysql.connect(host=rds_host_endpoint, port=rds_port, + user=username, password=password) + cursor = connection.cursor() + cursor.execute(""" + SHOW ENGINE INNODB STATUS; + """) + rds_result = cursor.fetchall() + cursor.close() + connection.close() + regex = r"-{4,}\sLATEST DETECTED DEADLOCK\s-{4,}\s((.*)\s)*?-{4,}" + global_str = "" + for row in rds_result: + matches = re.finditer(regex, row[2]) + for matchNum, match in enumerate(matches, start=1): + global_str = match.group() + expr = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}") + global_str = re.sub(expr, '', global_str) + #to avoid empty dead locks + if len(global_str) > 0: + service = splunk_client.connect(host=hostname, port=port, username=splunkusername, password=splunkpassword) + myindex = service.indexes[indexname] + # Open a socket + mysocket = myindex.attach(host=rds_host_endpoint, source="INNODB STATUS", sourcetype="RDS") + + # Send events to it + mysocket.send(str.encode(global_str)) + + # Close the socket + mysocket.close() + + +@click.command() +@click.option('--username', envvar='USERNAME', required=True) +@click.option('--password', envvar='PASSWORD', required=True) +@click.option('--environment', required=True, help='Use to identify the environment') +@click.option('--hostname', required=True, help='Use to identify the splunk hostname') +@click.option('--splunkusername', envvar='SPLUNKUSERNAME', required=True) +@click.option('--splunkpassword', envvar='SPLUNKPASSWORD', required=True) +@click.option('--port', required=True, help='Use to identify the splunk port') +@click.option('--indexname', required=True, help='Use to identify the splunk index name') +@click.option('--rdsignore', '-i', multiple=True, help='RDS name tags to not check, can be specified multiple times') +def main(username, password, environment, hostname, splunkusername, splunkpassword, port, indexname, rdsignore): + rds_list = rds_extractor(environment) + filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore]) + rds_controller(filtered_rds_list, username, password, hostname, splunkusername, splunkpassword, port, indexname) + + +if __name__ == '__main__': + main() + diff --git a/util/jenkins/export_dead_locks/requirements.txt b/util/jenkins/export_dead_locks/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/export_dead_locks/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/export_slow_logs/export_slow_query_logs.py b/util/jenkins/export_slow_logs/export_slow_query_logs.py new file mode 100644 index 00000000000..c5f1df1bb52 --- /dev/null +++ b/util/jenkins/export_slow_logs/export_slow_query_logs.py @@ -0,0 +1,149 @@ +from __future__ import absolute_import +from __future__ import print_function +import boto3 +from botocore.exceptions import ClientError +import sys +import backoff +import pymysql +import time +import uuid +import click + +MAX_TRIES = 5 + + +class CWBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("logs", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def put_log_events(self, **kwargs): + return self.client.put_log_events(**kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def create_log_stream(self, **kwargs): + return self.client.create_log_stream(**kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def create_log_group(self, **kwargs): + return self.client.create_log_group(**kwargs) + + +class EC2BotoWrapper: + def __init__(self): + self.client = boto3.client("ec2") + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_regions(self): + return self.client.describe_regions() + + +class RDSBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("rds", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_db_instances(self): + return self.client.describe_db_instances() + + +def rds_extractor(environment): + """ + Return list of all RDS instances across all the regions + Returns: + [ + { + 'name': name, + 'ARN': RDS ARN, + 'Region': Region of RDS + } + ] + """ + client_region = EC2BotoWrapper() + rds_list = [] + try: + regions_list = client_region.describe_regions() + except ClientError as e: + print(("Unable to connect to AWS with error :{}".format(e))) + sys.exit(1) + for region in regions_list["Regions"]: + client = RDSBotoWrapper(region_name=region["RegionName"]) + response = client.describe_db_instances() + for instance in response.get('DBInstances'): + if environment in instance.get("Endpoint").get("Address"): + temp_dict = {} + temp_dict["name"] = instance["DBInstanceIdentifier"] + temp_dict["ARN"] = instance["DBInstanceArn"] + temp_dict["Region"] = region["RegionName"] + temp_dict["Endpoint"] = instance.get("Endpoint").get("Address") + temp_dict["Username"] = instance.get("MasterUsername") + temp_dict["Port"] = instance.get("Port") + rds_list.append(temp_dict) + return rds_list + + +def rds_controller(rds_list, username, password): + for item in rds_list: + rds_host_endpoint = item["Endpoint"] + rds_port = item["Port"] + connection = pymysql.connect(host=rds_host_endpoint, port=rds_port, + user=username, password=password) + cursor = connection.cursor() + cursor.execute(""" + SELECT * + FROM mysql.slow_log + WHERE start_time > DATE_ADD(NOW(), INTERVAL -1 HOUR); + """) + rds_result = cursor.fetchall() + cursor.close() + connection.close() + if len(rds_result) > 0: + cw_logs = [] + sequencetoken = None + client = CWBotoWrapper() + loggroupname= "/slowlogs/" + rds_host_endpoint + try: + client.create_log_group(logGroupName=loggroupname) + print(('Created CloudWatch log group named "%s"', loggroupname)) + except ClientError: + print(('CloudWatch log group named "%s" already exists', loggroupname)) + LOG_STREAM = time.strftime('%Y-%m-%d') + "/[$LATEST]" + uuid.uuid4().hex + client.create_log_stream(logGroupName=loggroupname, logStreamName=LOG_STREAM) + for tables in rds_result: + temp = {} + temp["timestamp"] = int(tables[0].strftime("%s")) * 1000 + temp["message"] = "User@Host: " + str(tables[1]) + \ + "Query_time: " + str(tables[2]) + " Lock_time: " + str(tables[3]) + \ + " Rows_sent: " + str(tables[4]) + " Rows_examined: " + str(tables[5]) +\ + "Slow Query: " + str(tables[10]) + cw_logs.append(temp) + if sequencetoken == None: + response = client.put_log_events( + logGroupName=loggroupname, + logStreamName=LOG_STREAM, + logEvents=cw_logs + ) + else: + response = client.put_log_events( + logGroupName=loggroupname, + logStreamName=LOG_STREAM, + logEvents=cw_logs, + sequenceToken=sequencetoken + ) + sequencetoken = response["nextSequenceToken"] + + +@click.command() +@click.option('--username', envvar='USERNAME', required=True) +@click.option('--password', envvar='PASSWORD', required=True) +@click.option('--environment', required=True, help='Use to identify the environment') +@click.option('--rdsignore', '-i', multiple=True, help='RDS name tags to not check, can be specified multiple times') +def main(username, password, environment, rdsignore): + rds_list = rds_extractor(environment) + filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore]) + rds_controller(filtered_rds_list, username, password) + + +if __name__ == '__main__': + main() + diff --git a/util/jenkins/export_slow_logs/requirements.txt b/util/jenkins/export_slow_logs/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/export_slow_logs/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/list_mysql_process/list_mysql_process.py b/util/jenkins/list_mysql_process/list_mysql_process.py new file mode 100644 index 00000000000..343acb9ce04 --- /dev/null +++ b/util/jenkins/list_mysql_process/list_mysql_process.py @@ -0,0 +1,144 @@ +from __future__ import absolute_import +from __future__ import print_function +import boto3 +from botocore.exceptions import ClientError +import sys +import backoff +import pymysql +import click + +MAX_TRIES = 5 + + +class EC2BotoWrapper: + def __init__(self): + self.client = boto3.client("ec2") + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_regions(self): + return self.client.describe_regions() + + +class RDSBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("rds", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_db_instances(self): + return self.client.describe_db_instances() + + +def rds_extractor(environment): + """ + Return list of all RDS instances across all the regions + Returns: + [ + { + 'name': name, + 'Endpoint': Endpoint of RDS + 'Port': Port of RDS + } + ] + """ + client_region = EC2BotoWrapper() + rds_list = [] + try: + regions_list = client_region.describe_regions() + except ClientError as e: + print(("Unable to connect to AWS with error :{}".format(e))) + sys.exit(1) + for region in regions_list["Regions"]: + client = RDSBotoWrapper(region_name=region["RegionName"]) + response = client.describe_db_instances() + for instance in response.get('DBInstances'): + if environment in instance.get("Endpoint").get("Address"): + temp_dict = {} + temp_dict["name"] = instance["DBInstanceIdentifier"] + temp_dict["Endpoint"] = instance.get("Endpoint").get("Address") + temp_dict["Port"] = instance.get("Port") + rds_list.append(temp_dict) + return rds_list + + +def check_queries_running(rds_list, username, password): + """ + Return: + Return list of currently running queries + [ + { + "id": "string", + "user": "string", + "host": "string", + "command": "string", + "time": "integer", + "state": "string", + "info": "string" + } + ] + """ + try: + process_list = [] + for item in rds_list: + rds_host_endpoint = item["Endpoint"] + rds_port = item["Port"] + connection = pymysql.connect(host=rds_host_endpoint, + port=rds_port, user=username, password=password) + # prepare a cursor object using cursor() method + cursor = connection.cursor() + # execute SQL query using execute() method. + cursor.execute(""" + SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST + """) + + rds_result = cursor.fetchall() + cursor.close() + connection.close() + for process in rds_result: + temp_dict = {} + temp_dict["id"] = process[0] + temp_dict["user"] = process[1] + temp_dict["host"] = process[2] + temp_dict["command"] = process[4] + temp_dict["time"] = process[5] + temp_dict["state"] = process[6] + temp_dict["info"] = process[7] + process_list.append(temp_dict) + return process_list + except Exception as ex: + print(ex) + sys.exit(1) + + +@click.command() +@click.option('--username', envvar='USERNAME', required=True) +@click.option('--password', envvar='PASSWORD', required=True) +@click.option('--environment', required=True, help='Use to identify the environment') +@click.option('--rdsignore', '-i', multiple=True, help='RDS name tags to not check, can be specified multiple times') +def controller(username, password, environment, rdsignore): + """ + Control execution of all other functions + Arguments: + username (str): + Get this from cli args + + password (str): + Get this from cli args + + environment (str): + Get this from cli args + """ + rds_list = rds_extractor(environment) + filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore]) + process_list = check_queries_running(filtered_rds_list, username, password) + if len(process_list) > 0: + format_string = "{:<20}{:<20}{:<30}{:<20}{:<20}{:<70}{}" + print((format_string.format("Query ID", "User Name", "Host", "Command", "Time Executed", "State", "Info"))) + for items in process_list: + print((format_string.format(items["id"], items["user"], items["host"], items["command"], + str(items["time"]) + " sec", items["state"], items["info"]))) + exit(0) + + +if __name__ == '__main__': + controller() + diff --git a/util/jenkins/list_mysql_process/requirements.txt b/util/jenkins/list_mysql_process/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/list_mysql_process/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/missing_alerts_checker/missing_alerts_checker.py b/util/jenkins/missing_alerts_checker/missing_alerts_checker.py new file mode 100644 index 00000000000..dec759040d2 --- /dev/null +++ b/util/jenkins/missing_alerts_checker/missing_alerts_checker.py @@ -0,0 +1,272 @@ +from __future__ import absolute_import +from __future__ import print_function +import boto3 +import requests +import click +from botocore.exceptions import ClientError +import sys +import re + + +class NewRelic: + def __init__(self, new_relic_api_key): + self.url_alert_extractor = "https://api.newrelic.com/v2/alerts_policies.json" + self.headers = {'X-Api-Key': new_relic_api_key} + + def new_relic_policies_extractor(self): + """ + Return: + Return list of all alert policies extracted from New relic + { + "policy": { + "id": int, + "incident_preference": str, + "name": str, + "created_at": int, + "updated_at": int + } + } + """ + response = requests.get(self.url_alert_extractor, headers=self.headers) + if response.status_code != 200: + print("Unable to communicate with New relic.") + sys.exit(1) + try: + alert_policies = response.json() + except ValueError: + print(("Failed to parse response json. Got:\n{}".format(response.text))) + sys.exit(1) + return alert_policies + + +class InfraAlerts: + def edc_extractor(self): + """ + Return list of all EC2 instances with EDC's tags across all the regions + Returns: + [ + { + 'name': name, + 'ID': instance.id + } + ] + """ + client_region = boto3.client('ec2') + filter_tags = [ + { + "Name": "tag:environment", + "Values": ["*"] + }, + { + "Name": "tag:deployment", + "Values": ["*"] + }, + { + "Name": "tag:cluster", + "Values": ["*"] + }, + { + 'Name': 'instance-state-name', + 'Values': ['running'] + } + ] + instance_list = [] + try: + regions_list = client_region.describe_regions() + except ClientError as e: + print(("Unable to connect to AWS with error :{}".format(e))) + sys.exit(1) + for region in regions_list['Regions']: + client = boto3.resource('ec2', region_name=region['RegionName']) + response = client.instances.filter(Filters=filter_tags) + for instance in response: + temp_dict = {} + for tag in instance.tags: + if tag['Key'] == "Name": + name = tag['Value'] + temp_dict = { + 'name': name, + 'ID': instance.id + } + break + else: + pass + instance_list.append(temp_dict) + return instance_list + + def missing_alerts_checker(self, instance_list, alert_policies): + """ + Arguments: + instance_list (list): + List of all instances for which we find alerts + alert_policies list(dict): + List of all existing alerts new relic + Return: + Return list of all instances which have no alert in new Relic + [ + { + 'name': name, + 'ID': instance.id + } + ] + """ + result_instance = [] + for instance in instance_list: + if not any(policy["name"] == instance["name"] + "-infrastructure" for policy in alert_policies["policies"]): + result_instance.append(instance) + return result_instance + + +class AppAlerts: + def __init__(self, new_relic_api_key): + self.url_app_extractor = "https://api.newrelic.com/v2/applications.json" + self.headers = {'X-Api-Key': new_relic_api_key} + + def new_relic_app_extractor(self): + """ + Return: + Return list all applications in new relic + """ + response = requests.get(self.url_app_extractor, headers=self.headers) + if response.status_code != 200: + print("Unable to communicate with New relic.") + sys.exit(1) + try: + apps_list = response.json() + except ValueError: + print(("Failed to parse response json. Got:\n{}".format(response.text))) + sys.exit(1) + return apps_list["applications"] + + def missing_alerts_checker(self, app_list, alert_policies): + """ + Arguments: + app_list (list): + List of all applications for which we find alerts + alert_policies list(dict): + List of all existing alerts new relic + Return: + Return list of all applications which have no alert in new Relic + """ + result_apps = [] + for apps in app_list: + if not any(policy["name"] == apps["name"] + "-application" for policy in alert_policies["policies"]): + result_apps.append(apps) + return result_apps + + +class BrowserAlerts: + def __init__(self, new_relic_api_key): + self.url_browser_extractor = "https://api.newrelic.com/v2/browser_applications.json" + self.headers = {'X-Api-Key': new_relic_api_key} + + def new_relic_browser_extractor(self): + """ + Return: + Return list all browser applications in new relic + [ + { + "id": "integer", + "name": "string", + "browser_monitoring_key": "string", + "loader_script": "string" + } + ] + """ + response = requests.get(self.url_browser_extractor, headers=self.headers) + if response.status_code != 200: + print("Unable to communicate with New relic.") + sys.exit(1) + try: + browser_list = response.json() + except ValueError: + raise Exception("Failed to parse response json. Got:\n{}".format(response.text)) + return browser_list["browser_applications"] + + def missing_alerts_checker(self, browser_list, alert_policies): + """ + Arguments: + browser_list (list): + List of all browser applications for which we find alerts + alert_policies list(dict): + List of all existing alerts new relic + Return: + Return list of all browser applications which have no alert in new Relic + [ + { + "id": "integer", + "name": "string", + "browser_monitoring_key": "string", + "loader_script": "string" + } + ] + """ + result_browser = [] + for browser in browser_list: + if not any(policy["name"] == browser["name"].rstrip() + "-browser" for policy in alert_policies["policies"]): + result_browser.append(browser) + return result_browser + + +@click.command() +@click.option('--new-relic-api-key', required=True, help='API Key to use to speak with NewRelic.') +@click.option('--ignore', '-i', multiple=True, help='App name regex to filter out, can be specified multiple times') +def controller(new_relic_api_key,ignore): + """ + Control execution of all other functions + Arguments: + new_relic_api_key (str): + Get this from cli args + """ + flag = 0 + # Initializing object of classes + infracheck = InfraAlerts() + new_relic_obj = NewRelic(new_relic_api_key) + # Get list of all instances in different regions + instance_list = infracheck.edc_extractor() + # Get list of all alert policies in new relic + alert_policies = new_relic_obj.new_relic_policies_extractor() + # Get list of all instances without alerts + missing_alerts_list = infracheck.missing_alerts_checker(instance_list, alert_policies) + filtered_missing_alerts_list = list([x for x in missing_alerts_list if not any(re.search(r, x['name']) for r in ignore)]) + format_string = "{:<30}{}" + print((format_string.format("Instance ID", "Instance Name"))) + for instance_wo_alerts in filtered_missing_alerts_list: + print((format_string.format(instance_wo_alerts["ID"], instance_wo_alerts["name"]))) + flag = 1 + + # Initializing object of classes + appcheck = AppAlerts(new_relic_api_key) + new_relic_obj = NewRelic(new_relic_api_key) + # Get list of all applications from new relic + apps_list = appcheck.new_relic_app_extractor() + # Get list of all applications without alerts + missing_alerts_list_app = appcheck.missing_alerts_checker(apps_list, alert_policies) + filtered_missing_alerts_list_app = list([x for x in missing_alerts_list_app if not any(re.search(r, x['name']) for r in ignore)]) + format_string = "{:<20}{}" + print("") + print((format_string.format("Application ID", "Application Name"))) + for instance_wo_alerts in filtered_missing_alerts_list_app: + print((format_string.format(instance_wo_alerts["id"], instance_wo_alerts["name"]))) + flag = 1 + + # Initializing object of classes + browsercheck = BrowserAlerts(new_relic_api_key) + new_relic_obj = NewRelic(new_relic_api_key) + # Get list of all browser applications from new relic + browser_list = browsercheck.new_relic_browser_extractor() + # Get list of all browser applications without alerts + missing_alerts_list_browser = browsercheck.missing_alerts_checker(browser_list, alert_policies) + filtered_missing_alerts_list_browser = list([x for x in missing_alerts_list_browser if not any(re.search(r, x['name']) for r in ignore)]) + format_string = "{:<20}{}" + print("") + print((format_string.format("Browser ID", "Browser Name"))) + for instance_wo_alerts in filtered_missing_alerts_list_browser: + print((format_string.format(instance_wo_alerts["id"], instance_wo_alerts["name"]))) + flag = 1 + sys.exit(flag) + + +if __name__ == '__main__': + controller() + diff --git a/util/jenkins/primary_keys/__init__.py b/util/jenkins/primary_keys/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/util/jenkins/primary_keys/check_primary_keys.py b/util/jenkins/primary_keys/check_primary_keys.py new file mode 100644 index 00000000000..2e9209d8881 --- /dev/null +++ b/util/jenkins/primary_keys/check_primary_keys.py @@ -0,0 +1,356 @@ +from __future__ import absolute_import +from __future__ import print_function +import boto3 +from botocore.exceptions import ClientError +import sys +import backoff +import pymysql +import click +from datetime import datetime, timedelta, timezone +from six.moves import range + +MAX_TRIES = 5 +PERIOD = 360 +UNIT = 'Percent' + +class EC2BotoWrapper: + def __init__(self): + self.client = boto3.client("ec2") + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def describe_regions(self): + return self.client.describe_regions() + + +class CwBotoWrapper(): + def __init__(self): + self.client = boto3.client('cloudwatch') + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def list_metrics(self, *args, **kwargs): + return self.client.list_metrics(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def put_metric_data(self, *args, **kwargs): + return self.client.put_metric_data(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def get_metric_stats(self, *args, **kwargs): + return self.client.get_metric_statistics(*args, **kwargs) + + +class RDSBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("rds", **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def describe_db_instances(self): + return self.client.describe_db_instances() + + +class SESBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("ses", **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def send_email(self, *args, **kwargs): + return self.client.send_email(*args, **kwargs) + + +def send_an_email(to_addr, from_addr, primary_keys_message, region): + ses_client = SESBotoWrapper(region_name=region) + + message = """ +

Hello,

+

Primary keys of these tables exhausted soon

+ + + + + + + + """ + for item in range(len(primary_keys_message)): + message += """ + + + + + """.format( + Database=primary_keys_message[item]['database_name'], + Table=primary_keys_message[item]['table_name'], + UsedPercentage=primary_keys_message[item]['percentage_of_PKs_consumed'], + DaysRemaining=primary_keys_message[item]['remaining_days'] if "remaining_days" in primary_keys_message[item] else '' + ) + + message += """
DatabaseTableUsage PercentageRemaining Days
{Database}{Table}{UsedPercentage}{DaysRemaining}
""" + print(("Sending the following as email to {}".format(to_addr))) + print(message) + ses_client.send_email( + Source=from_addr, + Destination={ + 'ToAddresses': [ + to_addr + ] + }, + Message={ + 'Subject': { + 'Data': 'Primary keys of these table would be exhausted soon', + 'Charset': 'utf-8' + }, + 'Body': { + 'Html':{ + 'Data': message, + 'Charset': 'utf-8' + } + } + } + ) + + +def get_rds_from_all_regions(): + """ + Gets a list of RDS instances across all the regions and deployments in AWS + + :returns: + list of all RDS instances across all the regions + [ + { + 'name': name of RDS, + 'Endpoint': Endpoint of RDS + 'Port': Port of RDS + } + ] + name (string) + Endpoint (string) + Port (string) + """ + ec2_client = EC2BotoWrapper() + rds_list = [] + try: + regions_list = ec2_client.describe_regions() + except ClientError as e: + print(("Unable to connect to AWS with error :{}".format(e))) + sys.exit(1) + for region in regions_list["Regions"]: + print(("Getting RDS instances in region {}".format(region["RegionName"]))) + rds_client = RDSBotoWrapper(region_name=region["RegionName"]) + response = rds_client.describe_db_instances() + for instance in response.get('DBInstances'): + temp_dict = dict() + temp_dict["name"] = instance["DBInstanceIdentifier"] + temp_dict["Endpoint"] = instance.get("Endpoint").get("Address") + temp_dict["Port"] = instance.get("Port") + rds_list.append(temp_dict) + return rds_list + + +def check_primary_keys(rds_list, username, password, environment, deploy): + """ + :param rds_list: + :param username: + :param password: + + :returns: + Return list of all tables that cross threshold limit + [ + { + "name": "string", + "db": "string", + "table": "string", + "size": "string", + } + ] + """ + cloudwatch = CwBotoWrapper() + metric_name = 'used_key_space' + namespace = "rds-primary-keys/{}-{}".format(environment, deploy) + try: + table_list = [] + metric_data = [] + tables_reaching_exhaustion_limit = [] + for rds_instance in rds_list: + print(("Checking rds instance {}".format(rds_instance["name"]))) + rds_host_endpoint = rds_instance["Endpoint"] + rds_port = rds_instance["Port"] + connection = pymysql.connect(host=rds_host_endpoint, + port=rds_port, + user=username, + password=password) + # prepare a cursor object using cursor() method + cursor = connection.cursor() + # execute SQL query using execute() method. + # this query will return the tables with usage in percentage, result is limited to 10 + cursor.execute(""" + SELECT + table_schema, + table_name, + column_name, + column_type, + auto_increment, + max_int, + ROUND(auto_increment/max_int*100,2) AS used_pct + FROM + ( + SELECT + table_schema, + table_name, + column_name, + column_type, + auto_increment, + pow + (2, + case data_type + when 'tinyint' then 7 + when 'smallint' then 15 + when 'mediumint' then 23 + when 'int' then 31 + when 'bigint' then 63 + end + +(column_type like '% unsigned'))-1 + as max_int + FROM + information_schema.tables t + JOIN information_schema.columns c + USING (table_schema,table_name) + WHERE t.table_schema not in ('mysql','information_schema','performance_schema') + AND t.table_type = 'base table' + AND c.extra LIKE '%auto_increment%' + AND t.auto_increment IS NOT NULL + ) + TMP ORDER BY used_pct desc + LIMIT 10; + """) + rds_result = cursor.fetchall() + cursor.close() + connection.close() + for result_table in rds_result: + table_data = {} + db_name = result_table[0] + table_name = result_table[1] + table_name_combined = "{}.{}".format(db_name, table_name) + table_percent = result_table[6] + if table_percent > 70: + print(("RDS {} Table {}: Primary keys {}% full".format( + rds_instance["name"], table_name_combined, table_percent))) + metric_data.append({ + 'MetricName': metric_name, + 'Dimensions': [{ + "Name": rds_instance["name"], + "Value": table_name_combined + }], + 'Value': table_percent, # percentage of the usage of primary keys + 'Unit': UNIT + }) + table_data["database_name"] = rds_instance['name'] + table_data["table_name"] = table_name_combined + table_data["percentage_of_PKs_consumed"] = table_percent + remaining_days_table_name = table_name_combined + # Hack to transition to metric names with db prepended + if table_name == "courseware_studentmodule" and rds_instance["name"] in [ + "prod-edx-edxapp-us-east-1b-2", + "prod-edx-edxapp-us-east-1c-2", + ]: + remaining_days_table_name = table_name + metric_data.append({ + 'MetricName': metric_name, + 'Dimensions': [{ + "Name": rds_instance["name"], + "Value": table_name + }], + 'Value': table_percent, # percentage of the usage of primary keys + 'Unit': UNIT + }) + + remaining_days = get_metrics_and_calcuate_diff(namespace, metric_name, rds_instance["name"], table_name, table_percent) + if remaining_days: + table_data["remaining_days"] = remaining_days + tables_reaching_exhaustion_limit.append(table_data) + if len(metric_data) > 0: + cloudwatch.put_metric_data(Namespace=namespace, MetricData=metric_data) + return tables_reaching_exhaustion_limit + except Exception as e: + print(("Please see the following exception ", e)) + sys.exit(1) + + +def get_metrics_and_calcuate_diff(namespace, metric_name, dimension, value, current_consumption): + cloudwatch = CwBotoWrapper() + res = cloudwatch.get_metric_stats( + Namespace=namespace, + MetricName=metric_name, + Dimensions=[ + { + 'Name': dimension, + 'Value': value + }, + ], + StartTime=datetime.utcnow() - timedelta(days=180), + EndTime=datetime.utcnow(), + Period=86400, + Statistics=[ + 'Maximum', + ], + Unit=UNIT + ) + datapoints = res["Datapoints"] + days_remaining_before_exhaustion = '' + if len(datapoints) > 0: + max_value = max(datapoints, key=lambda x: x['Timestamp']) + time_diff = datetime.now(timezone.utc) - max_value["Timestamp"] + last_max_reading = max_value["Maximum"] + consumed_keys_percentage = 100 - current_consumption + if current_consumption > last_max_reading: + current_usage = current_consumption - last_max_reading + no_of_days = time_diff.days + increase_over_time_period = current_usage/no_of_days + days_remaining_before_exhaustion = consumed_keys_percentage/increase_over_time_period + print(("Days remaining for {table} table on db {db}: {days}".format(table=value, + db=dimension, + days=days_remaining_before_exhaustion))) + return days_remaining_before_exhaustion + + + + +@click.command() +@click.option('--username', envvar='USERNAME', required=True) +@click.option('--password', envvar='PASSWORD', required=True) +@click.option('--environment', '-e', required=True) +@click.option('--deploy', '-d', required=True, + help="Deployment (i.e. edx or edge)") +@click.option('--region', multiple=True, help='Default AWS region') +@click.option('--recipient', multiple=True, help='Recipient Email address') +@click.option('--sender', multiple=True, help='Sender email address') +@click.option('--rdsignore', '-i', multiple=True, help='RDS name tags to not check, can be specified multiple times') +def controller(username, password, environment, deploy, region, recipient, sender, rdsignore): + """ + calls other function and calculate the results + :param username: username for the RDS. + :param password: password for the RDS. + :return: None + """ + # get list of all the RDSes across all the regions and deployments + rds_list = get_rds_from_all_regions() + filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore]) + table_list = check_primary_keys(filtered_rds_list, username, password, environment, deploy) + if len(table_list) > 0: + send_an_email(recipient[0], sender[0], table_list, region[0]) + sys.exit(0) + + +if __name__ == "__main__": + controller() diff --git a/util/jenkins/primary_keys/requirements.txt b/util/jenkins/primary_keys/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/primary_keys/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/rds_alarms_checker/missing_rds_alarms.py b/util/jenkins/rds_alarms_checker/missing_rds_alarms.py new file mode 100644 index 00000000000..2f7b3cd43c4 --- /dev/null +++ b/util/jenkins/rds_alarms_checker/missing_rds_alarms.py @@ -0,0 +1,104 @@ +from __future__ import absolute_import +from __future__ import print_function +import boto3 +from botocore.exceptions import ClientError +import sys +import backoff +import click + +MAX_TRIES = 5 + + +class EC2BotoWrapper: + def __init__(self): + self.client = boto3.client("ec2") + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_regions(self): + return self.client.describe_regions() + + +class RDSBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("rds", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_db_instances(self): + return self.client.describe_db_instances() + + +class CWBotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("cloudwatch", **kwargs) + + @backoff.on_exception(backoff.expo, ClientError, max_tries=MAX_TRIES) + def describe_alarms(self, **kwargs): + return self.client.describe_alarms(**kwargs) + + +def rds_extractor(): + """ + Return list of all RDS instances across all the regions + Returns: + [ + { + 'name': name, + 'ARN': RDS ARN, + 'Region': Region of RDS + } + ] + """ + client_region = EC2BotoWrapper() + rds_list = [] + try: + regions_list = client_region.describe_regions() + except ClientError as e: + print(("Unable to connect to AWS with error :{}".format(e))) + sys.exit(1) + for region in regions_list["Regions"]: + client = RDSBotoWrapper(region_name=region["RegionName"]) + response = client.describe_db_instances() + for instance in response.get('DBInstances'): + temp_dict = {} + temp_dict["name"] = instance["DBInstanceIdentifier"] + temp_dict["ARN"] = instance["DBInstanceArn"] + temp_dict["Region"] = region["RegionName"] + rds_list.append(temp_dict) + return rds_list + + +def cloudwatch_alarm_checker(alarmprefix, region): + """ + Return number of alarms associated with given RDS instance + Returns: + len(alarms): integer + """ + client = CWBotoWrapper(region_name=region) + alarms = client.describe_alarms(AlarmNamePrefix=alarmprefix) + return len(alarms.get('MetricAlarms')) + + +@click.command() +@click.option('--whitelist', type=(str), multiple=True, help='List of Whitelisted RDS') +def controller(whitelist): + """ + Control execution of all other functions + """ + rds = rds_extractor() + missing_alarm = [] + # List of RDS we don't care about + ignore_rds_list = list(whitelist) + for db in rds: + if db["name"] not in ignore_rds_list: + alarms_count = cloudwatch_alarm_checker(db["name"], db["Region"]) + if alarms_count < 1: + missing_alarm.append(db["name"]) + if len(missing_alarm) > 0: + print("RDS Name") + print('\n'.join(str(p) for p in missing_alarm)) + sys.exit(1) + sys.exit(0) + + +if __name__ == '__main__': + controller() diff --git a/util/jenkins/rds_alarms_checker/requirements.txt b/util/jenkins/rds_alarms_checker/requirements.txt new file mode 120000 index 00000000000..dc833dd4bef --- /dev/null +++ b/util/jenkins/rds_alarms_checker/requirements.txt @@ -0,0 +1 @@ +../requirements.txt \ No newline at end of file diff --git a/util/jenkins/requirements-celery.txt b/util/jenkins/requirements-celery.txt deleted file mode 100644 index c28cbfc9176..00000000000 --- a/util/jenkins/requirements-celery.txt +++ /dev/null @@ -1,22 +0,0 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# make upgrade -# -awscli==1.14.32 -backoff==1.4.3 -boto3==1.5.4 -botocore==1.8.36 # via awscli, boto3, s3transfer -click==6.7 -colorama==0.3.7 # via awscli -docutils==0.14 # via awscli, botocore -futures==3.2.0 ; python_version == "2.7" -jmespath==0.9.3 # via boto3, botocore -pyasn1==0.4.2 # via rsa -python-dateutil==2.7.3 # via botocore -pyyaml==3.12 # via awscli -redis==2.10.6 -rsa==3.4.2 # via awscli -s3transfer==0.1.13 # via awscli, boto3 -six==1.11.0 # via python-dateutil diff --git a/util/jenkins/requirements-cloudflare.txt b/util/jenkins/requirements-cloudflare.txt index 773d9dd78f8..e1fc23ca3a1 100644 --- a/util/jenkins/requirements-cloudflare.txt +++ b/util/jenkins/requirements-cloudflare.txt @@ -4,4 +4,8 @@ # # make upgrade # -requests==2.9.1 +certifi==2020.4.5.1 # via requests +chardet==3.0.4 # via requests +idna==2.7 # via requests +requests==2.20.0 # via -r requirements/cloudflare.in +urllib3==1.24.3 # via requests diff --git a/util/jenkins/requirements.txt b/util/jenkins/requirements.txt new file mode 100644 index 00000000000..51ff9793bfc --- /dev/null +++ b/util/jenkins/requirements.txt @@ -0,0 +1,39 @@ +# +# This file is autogenerated by pip-compile +# To update, run: +# +# make upgrade +# +amqp==1.4.9 # via kombu +anyjson==0.3.3 # via kombu +awscli==1.14.32 +backoff==1.4.3 +billiard==3.3.0.23 # via celery +boto3==1.5.4 +botocore==1.8.36 # via awscli, boto3, s3transfer +celery==3.1.25 +certifi==2019.11.28 # via opsgenie-sdk, requests +chardet==3.0.4 # via requests +click==6.7 +colorama==0.3.7 # via awscli +docutils==0.15.2 # via awscli, botocore +futures==3.3.0 ; python_version == "2.7" +idna==2.8 # via requests +jmespath==0.9.4 # via boto3, botocore +kombu==3.0.37 # via celery +opsgenie-sdk==0.3.1 +pyasn1==0.4.8 # via rsa +PyMySQL==0.9.3 +python-dateutil==2.8.1 # via botocore, opsgenie-sdk +pytz==2019.3 # via celery, opsgenie-sdk +pyyaml==3.12 # via awscli +redis==2.10.6 +requests==2.22.0 # via opsgenie-sdk +rsa==3.4.2 # via awscli +splunk-sdk==1.6.6 +s3transfer==0.1.13 # via awscli, boto3 +six==1.13.0 # via opsgenie-sdk, python-dateutil +urllib3==1.25.7 # via opsgenie-sdk, requests + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/util/jenkins/update_celery_monitoring/create_celery_dashboard.py b/util/jenkins/update_celery_monitoring/create_celery_dashboard.py new file mode 100644 index 00000000000..0b1668d14b4 --- /dev/null +++ b/util/jenkins/update_celery_monitoring/create_celery_dashboard.py @@ -0,0 +1,224 @@ +from __future__ import absolute_import +from __future__ import print_function +import pprint +import re + +import boto3 +import botocore +import backoff +import click +import json + +MAX_TRIES = 1 + +class CwBotoWrapper(object): + def __init__(self): + self.client = boto3.client('cloudwatch') + + @backoff.on_exception(backoff.expo, + (botocore.exceptions.ClientError), + max_tries=MAX_TRIES) + def list_metrics(self, *args, **kwargs): + return self.client.list_metrics(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + (botocore.exceptions.ClientError), + max_tries=MAX_TRIES) + def put_dashboard(self, *args, **kwargs): + return self.client.put_dashboard(*args, **kwargs) + +def generate_dashboard_widget_metrics(cloudwatch, namespace, metric_name, dimension_name, properties={}, include_filter=None, exclude_filter=None, right_axis_items=[]): + # https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html#CloudWatch-Dashboard-Properties-Metrics-Array-Format + # [Namespace, MetricName, [{DimensionName,DimensionValue}...] [Rendering Properties Object] ] + # ['AWS/EC2', 'CPUUtilization', 'AutoScalingGroupName', 'asg-name', {'period': 60}] + pp = pprint.PrettyPrinter(indent=4) + + metrics = cloudwatch.list_metrics( + Namespace=namespace, MetricName=metric_name, Dimensions=[{"Name": dimension_name}] + ) + + values = [] + + for metric in metrics['Metrics']: + for dimension in metric['Dimensions']: + if dimension['Name'] == dimension_name: + if include_filter is None or re.search(include_filter, dimension['Value'], re.IGNORECASE): + if exclude_filter is None or not re.search(exclude_filter, dimension['Value'], re.IGNROECASE): + values.append(dimension['Value']) + + values.sort() + + new_widget_metrics = [] + for value in values: + value_properties = properties.copy() + value_properties['label'] = value + if value in right_axis_items: + value_properties["yAxis"] = "right" + new_widget_metrics.append([namespace, metric_name, dimension_name, value, value_properties]) + + return new_widget_metrics + +# * means that all arguments after cloudwatch are keyword arguments only and are not positional +def generate_dashboard_widget( + cloudwatch, + *, + x=0, + y, + title, + namespace, + metric_name, + dimension_name, + metrics_properties={}, + include_filter=None, + exclude_filter=None, + height, + width=24, + stacked=False, + region='us-east-1', + period=60, + right_axis_items=[] +): + return { 'type': 'metric', 'height': height, 'width': width, 'x': x, 'y': y, + 'properties': { + 'period': period, 'view': 'timeSeries', 'stacked': stacked, 'region': region, + 'title': "{} (auto-generated)".format(title), + 'metrics': generate_dashboard_widget_metrics(cloudwatch, namespace, metric_name, dimension_name, metrics_properties, + include_filter=include_filter, exclude_filter=exclude_filter, right_axis_items=right_axis_items) + } +} + +@click.command() +@click.option('--environment', '-e', required=True) +@click.option('--deploy', '-d', required=True, + help="Deployment (i.e. edx or edge)") +def generate_dashboard(environment, deploy): + pp = pprint.PrettyPrinter(indent=4) + cloudwatch = CwBotoWrapper() + + dashboard_name = "{}-{}-queues".format(environment, deploy) + celery_namespace = "celery/{}-{}".format(environment, deploy) + xqueue_namespace = "xqueue/{}-{}".format(environment, deploy) + + widgets = [] + width = 24 + y_cord = 0 + region = "us-east-1" + right_axis_items=["edx.lms.core.ace", "edx.lms.core.background_process", "notifier.default"] + right_axis_items_age=["notifier.default"] + + height = 9 + + cpu_widget = generate_dashboard_widget(cloudwatch, y=y_cord, height=height, + title="{}-{}-Worker ASG Average CPU".format(environment, deploy), + namespace="AWS/EC2", metric_name="CPUUtilization", dimension_name="AutoScalingGroupName", + include_filter="{}-{}-Worker".format(environment, deploy) + ) + + cpu_widget['properties']['annotations'] = { + 'horizontal': [ + {'label': 'Scale Up', 'value': 90, 'color': '#d62728'}, + {'label': 'Scale Down', 'value': 45, 'color': '#2ca02c'} + ] + } + + cpu_widget['properties']['yAxis'] = {'left': {'min': 0, 'max': 100}} + + widgets.append(cpu_widget) + + y_cord += height + height = 6 + + worker_count_widget = generate_dashboard_widget(cloudwatch, y=y_cord, height=height, + title="{}-{}-Worker Count".format(environment, deploy), + namespace=celery_namespace, metric_name="count", dimension_name="workers" + ) + + worker_count_widget['properties']['stacked'] = True + + widgets.append(worker_count_widget) + + y_cord += height + height = 9 + + widgets.append(generate_dashboard_widget(cloudwatch, y=y_cord, height=height, + title="{}-{} All Celery Queues".format(environment, deploy), + namespace=celery_namespace, metric_name="queue_length", dimension_name="queue", + right_axis_items=right_axis_items + )) + + y_cord += height + height = 9 + + widgets.append(generate_dashboard_widget(cloudwatch, y=y_cord, height=height, + title="{}-{} All Queues Next Task Age".format(environment, deploy), + namespace=celery_namespace, metric_name="next_task_age", dimension_name="queue", + right_axis_items=right_axis_items_age + )) + + y_cord += height + height = 9 + + widgets.append(generate_dashboard_widget(cloudwatch, y=y_cord, height=height, + title="{}-{} LMS Queues".format(environment, deploy), + namespace=celery_namespace, metric_name="queue_length", dimension_name="queue", + include_filter="^edx.lms", + right_axis_items=right_axis_items + )) + + y_cord += height + height = 9 + + widgets.append(generate_dashboard_widget(cloudwatch, y=y_cord, height=height, + title="{}-{} CMS Queues".format(environment, deploy), + namespace=celery_namespace, metric_name="queue_length", dimension_name="queue", + include_filter="^edx.cms", + right_axis_items=right_axis_items + )) + + if deploy == 'edx' and environment == 'prod': + y_cord += height + height = 9 + + widgets.append(generate_dashboard_widget(cloudwatch, y=y_cord, height=height, + title="{}-{} Xqueue Queues".format(environment, deploy), + namespace=xqueue_namespace, metric_name="queue_length", dimension_name="queue", + )) + + + if deploy in ["edx", "edge"]: + y_cord += height + height = 9 + + widgets.append(generate_dashboard_widget(cloudwatch, y=y_cord, height=height, + title="{}-{} Ecommerce".format(environment, deploy), + namespace=celery_namespace, metric_name="queue_length", dimension_name="queue", + include_filter="^ecommerce\.", + )) + + y_cord += height + height = 9 + + widgets.append(generate_dashboard_widget(cloudwatch, y=y_cord, height=height, + title="{}-{} Notifier".format(environment, deploy), + namespace=celery_namespace, metric_name="queue_length", dimension_name="queue", + include_filter="^notifier\.", + )) + + y_cord += height + height = 9 + + widgets.append(generate_dashboard_widget(cloudwatch, y=y_cord, height=height, + title="{}-{} Legacy Celery (Notifier/Ecommerce) should be 0".format(environment, deploy), + namespace=celery_namespace, metric_name="queue_length", dimension_name="queue", + include_filter="celery", + )) + + dashboard_body = { 'widgets': widgets } + + print("Dasboard Body") + pp.pprint(dashboard_body) + + cloudwatch.put_dashboard(DashboardName=dashboard_name, DashboardBody=json.dumps(dashboard_body)) + +if __name__ == '__main__': + generate_dashboard() diff --git a/util/jenkins/update_celery_monitoring/requirements.txt b/util/jenkins/update_celery_monitoring/requirements.txt new file mode 100644 index 00000000000..5e3417a5461 --- /dev/null +++ b/util/jenkins/update_celery_monitoring/requirements.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile +# To update, run: +# +# make upgrade +# +awscli==1.14.32 # via -r requirements/celery.in +backoff==1.4.3 # via -r requirements/celery.in +boto3==1.5.4 # via -r requirements/celery.in +botocore==1.8.36 # via awscli, boto3, s3transfer +click==6.7 # via -r requirements/celery.in +colorama==0.3.7 # via awscli +docutils==0.16 # via awscli, botocore +jmespath==0.9.5 # via boto3, botocore +pyasn1==0.4.8 # via rsa +python-dateutil==2.8.1 # via botocore +pyyaml==3.12 # via awscli +redis==2.10.6 # via -r requirements/celery.in +rsa==3.4.2 # via awscli +s3transfer==0.1.13 # via awscli, boto3 +six==1.14.0 # via python-dateutil diff --git a/util/jenkins/update_celery_monitoring/update_celery_monitoring.py b/util/jenkins/update_celery_monitoring/update_celery_monitoring.py new file mode 100644 index 00000000000..696d41b529b --- /dev/null +++ b/util/jenkins/update_celery_monitoring/update_celery_monitoring.py @@ -0,0 +1,292 @@ +from __future__ import absolute_import +from __future__ import print_function +import re +import redis +import click +import boto3 +import botocore +import backoff +from pprint import pprint +from itertools import zip_longest +from collections import defaultdict + +MAX_TRIES = 5 + +# Queues that should be gone. Inclusion in this list will stop this script from +# zero filling them, but if they are >0 they will still get tracked +queue_blacklist = ['celery', 'ecommerce'] + + +class RedisWrapper(object): + def __init__(self, *args, **kwargs): + self.redis = redis.StrictRedis(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def keys(self): + return list(self.redis.keys()) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def type(self, key): + return self.redis.type(key) + + @backoff.on_exception(backoff.expo, + (redis.exceptions.TimeoutError, + redis.exceptions.ConnectionError), + max_tries=MAX_TRIES) + def llen(self, key): + return self.redis.llen(key) + + +class CwBotoWrapper(object): + def __init__(self, dev_test_mode=True): + self.dev_test_mode = dev_test_mode + self.client = boto3.client('cloudwatch') + + @backoff.on_exception(backoff.expo, + (botocore.exceptions.ClientError), + max_tries=MAX_TRIES) + def list_metrics(self, *args, **kwargs): + return self.client.list_metrics(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + (botocore.exceptions.ClientError), + max_tries=MAX_TRIES) + def put_metric_data(self, *args, **kwargs): + if self.dev_test_mode: + print(("Test Mode: would have run put_metric_data({},{})".format(args, kwargs))) + else: + return self.client.put_metric_data(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + (botocore.exceptions.ClientError), + max_tries=MAX_TRIES) + def describe_alarms(self, *args, **kwargs): + return self.client.describe_alarms(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + (botocore.exceptions.ClientError), + max_tries=MAX_TRIES) + def put_metric_alarm(self, *args, **kwargs): + if self.dev_test_mode: + print(("Test Mode: would have run put_metric_alarm({},{})".format(args, kwargs))) + else: + return self.client.put_metric_alarm(*args, **kwargs) + + +class Ec2BotoWrapper(object): + def __init__(self): + self.client = boto3.client('ec2') + + @backoff.on_exception(backoff.expo, + (botocore.exceptions.ClientError), + max_tries=MAX_TRIES) + def describe_instances(self, *args, **kwargs): + return self.client.describe_instances(*args, **kwargs) + + +def count_workers(environment, deploy, cluster): + ec2 = Ec2BotoWrapper() + + counts_by_play = defaultdict(int) + + reservations = ec2.describe_instances( + Filters=[ + {'Name': 'tag:environment', 'Values': [environment]}, + {'Name': 'tag:deployment', 'Values': [deploy]}, + {'Name': 'tag:cluster', 'Values': [cluster]}, + {'Name': 'instance-state-name', 'Values': ['running']}, + ] + )['Reservations'] + + for reservation in reservations: + for instance in reservation["Instances"]: + tag_asg = None + for tag in instance['Tags']: + if tag.get('Key') == 'aws:autoscaling:groupName': + # Reduce number of metrics from 1000 to 10 by changing first 2 numbers of ASG version to stars + # This reduces the cloudwatch cost + tag_asg = re.sub('-v[0-9]{2}', '-v**', tag.get('Value')) + counts_by_play[tag_asg] += 1 + + metric_data = [] + + for play, num_workers in counts_by_play.items(): + metric_data.append({ + 'MetricName': 'count', + 'Dimensions': [{ + "Name": "workers", + "Value": play + }], + 'Value': num_workers + } + ) + + return metric_data + + +@click.command() +@click.option('--host', '-h', default='localhost', + help='Hostname of redis server') +@click.option('--port', '-p', default=6379, help='Port of redis server') +@click.option('--environment', '-e', required=True) +@click.option('--deploy', '-d', required=True, + help="Deployment (i.e. edx or edge)") +@click.option('--max-metrics', default=20, + help='Maximum number of CloudWatch metrics to publish') +@click.option('--threshold', default=50, + help='Default maximum queue length before alarm notification is' + + ' sent') +@click.option('--queue-threshold', type=(str, int), multiple=True, + help='Threshold per queue in format --queue-threshold' + + ' {queue_name} {threshold}. May be used multiple times') +@click.option('--sns-arn', '-s', help='ARN for SNS alert topic', required=True) +@click.option('--dev-test-mode', is_flag=True, help='Enable dev (no-op) mode') +def check_queues(host, port, environment, deploy, max_metrics, threshold, + queue_threshold, sns_arn, dev_test_mode): + + thresholds = dict(queue_threshold) + + timeout = 1 + redis_client = RedisWrapper(host=host, port=port, socket_timeout=timeout, + socket_connect_timeout=timeout) + cloudwatch = CwBotoWrapper(dev_test_mode=dev_test_mode) + namespace = "celery/{}-{}".format(environment, deploy) + metric_name = 'queue_length' + dimension = 'queue' + response = cloudwatch.list_metrics(Namespace=namespace, + MetricName=metric_name, + Dimensions=[{'Name': dimension}]) + existing_queues = [] + for m in response["Metrics"]: + existing_queues.extend( + [d['Value'] for d in m["Dimensions"] if ( + d['Name'] == dimension and + not d['Value'] in queue_blacklist and + not d['Value'].endswith(".pidbox") and + not d['Value'].startswith("_kombu"))]) + + redis_queues = set([k.decode() for k in redis_client.keys() + if (redis_client.type(k) == b'list' and + not k.decode().endswith(".pidbox") and + not k.decode().startswith("_kombu"))]) + + all_queues = existing_queues + list( + set(redis_queues).difference(existing_queues) + ) + + metric_data = [] + + for queue_name in all_queues: + metric_data.append({ + 'MetricName': metric_name, + 'Dimensions': [{ + "Name": dimension, + "Value": queue_name + }], + 'Value': redis_client.llen(queue_name), + 'Unit': 'Count', + }) + + if len(metric_data) > 0: + for metric_data_grouped in grouper(metric_data, max_metrics): + print("metric_data:") + pprint(metric_data, width=120) + cloudwatch.put_metric_data(Namespace=namespace, MetricData=metric_data) + + for queue in all_queues: + dimensions = [{'Name': dimension, 'Value': queue}] + queue_threshold = threshold + if queue in thresholds: + queue_threshold = thresholds[queue] + # Period is in seconds + period = 60 + evaluation_periods = 15 + comparison_operator = "GreaterThanThreshold" + treat_missing_data = "notBreaching" + statistic = "Maximum" + actions = [sns_arn] + alarm_name = "{}-{} {} queue length over threshold".format(environment, + deploy, + queue) + + existing_alarms = cloudwatch.describe_alarms(AlarmNames=[alarm_name])['MetricAlarms'] + do_put_alarm = False + if len(existing_alarms) > 1: + print(("WARNINING: found multiple existing alarms for {}".format(alarm_name))) + pprint(existing_alarms) + do_put_alarm = True + elif len(existing_alarms) == 1: + existing_alarm = existing_alarms[0] + + if (existing_alarm.get('Threshold') != queue_threshold or + existing_alarm.get('AlarmDescription') != alarm_name or + existing_alarm.get('Namespace') != namespace or + existing_alarm.get('MetricName') != metric_name or + existing_alarm.get('Dimensions') != dimensions or + existing_alarm.get('Period') != period or + existing_alarm.get('EvaluationPeriods') != evaluation_periods or + existing_alarm.get('TreatMissingData') != treat_missing_data or + existing_alarm.get('ComparisonOperator') != comparison_operator or + existing_alarm.get('Statistic') != statistic): + do_put_alarm = True + print("1") + elif not (len(existing_alarm.get('InsufficientDataActions')) == 1 and + existing_alarm.get('InsufficientDataActions')[0] == actions[0]): + do_put_alarm = True + elif not (len(existing_alarm.get('OKActions')) == 1 and + existing_alarm.get('OKActions')[0] == actions[0]): + do_put_alarm = True + elif not (len(existing_alarm.get('AlarmActions')) == 1 and + existing_alarm.get('AlarmActions')[0] == actions[0]): + do_put_alarm = True + if do_put_alarm: + print(('Updating existing alarm "{}"'.format(alarm_name))) + else: + do_put_alarm = True + print(('Creating new alarm "{}"'.format(alarm_name))) + if not do_put_alarm: + print(('Not updating alarm "{}", no changes'.format(alarm_name))) + else: + print(('put_alarm_metric: {}'.format(alarm_name))) + cloudwatch.put_metric_alarm(AlarmName=alarm_name, + AlarmDescription=alarm_name, + Namespace=namespace, + MetricName=metric_name, + Dimensions=dimensions, + Period=period, + EvaluationPeriods=evaluation_periods, + TreatMissingData=treat_missing_data, + Threshold=queue_threshold, + ComparisonOperator=comparison_operator, + Statistic=statistic, + InsufficientDataActions=actions, + OKActions=actions, + AlarmActions=actions) + + # Track number of worker instances so it can be graphed in CloudWatch + workers_metric_data = count_workers(environment, deploy, 'worker') + print("workers_metric_data:") + pprint(workers_metric_data, width=120) + cloudwatch.put_metric_data(Namespace=namespace, MetricData=workers_metric_data) + + +# Stolen right from the itertools recipes +# https://docs.python.org/3/library/itertools.html#itertools-recipes +def grouper(iterable, n, fillvalue=None): + "Collect data into fixed-length chunks or blocks" + # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" + args = [iter(iterable)] * n + chunks = zip_longest(*args, fillvalue=fillvalue) + # Remove Nones in function + for chunk in chunks: + yield [v for v in chunk if v is not None] + + +if __name__ == '__main__': + check_queues() diff --git a/util/maintenance.sh b/util/maintenance.sh new file mode 100755 index 00000000000..ffe40a1bef2 --- /dev/null +++ b/util/maintenance.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +usage() { + echo "Usage: $0 environment-deploy (enable|disable)" + echo + echo "Examples:" + echo " Turn on maintenance page for stage-edx: $0 stage-edx enable" + echo " Turn off maintenance page for stage-edx: $0 stage-edx disable" + exit 1 +} + +ED=$1 +ENABLE_ARG=$2 + +case $ED in + loadtest-edx|stage-edx|prod-edx|prod-edge) + ;; + *) + echo "ERROR: environment-deploy must be one of loadtest-edx, stage-edx, prod-edx or prod-edge" + echo + usage + ;; +esac + +case $ENABLE_ARG in + enable) + ENABLE="True" + ;; + disable) + ENABLE="False" + ;; + *) + echo "ERROR: must specify enable or disable" + echo + usage + ;; +esac + +INVENTORY=$(aws ec2 describe-instances --filter "Name=tag:Name,Values=${ED}-edxapp,${ED}-studio,${ED}-worker" --query 'Reservations[].Instances[].PrivateIpAddress' --output text | tr '\t' ',') +ENABLE_EXTRA_VAR="{\"ENABLE_MAINTENANCE\": ${ENABLE}}" + +ansible-playbook ./edx_maintenance.yml -i "${INVENTORY}," -e "${ENABLE_EXTRA_VAR}" diff --git a/util/old/import_xml_courses.py b/util/old/import_xml_courses.py index 5d1e1e16255..20436b40ffc 100644 --- a/util/old/import_xml_courses.py +++ b/util/old/import_xml_courses.py @@ -13,6 +13,8 @@ # version: #} +from __future__ import absolute_import +from __future__ import print_function import argparse from os.path import basename import yaml @@ -63,4 +65,4 @@ "EDXAPP_XML_MAPPINGS": all_xml_mappings, "EDXAPP_XML_FROM_GIT": True } - print yaml.safe_dump(edxapp_xml_courses, default_flow_style=False) + print(yaml.safe_dump(edxapp_xml_courses, default_flow_style=False)) diff --git a/util/packer/jenkins_worker.json b/util/packer/jenkins_worker.json index 316db2942aa..aed27e975ad 100644 --- a/util/packer/jenkins_worker.json +++ b/util/packer/jenkins_worker.json @@ -2,7 +2,7 @@ "variables": { "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", - "new_relic_key": "{{env `NEW_RELIC_KEY`}}", + "new_relic_infrastructure_license_key": "{{env `NEWRELIC_INFRASTRUCTURE_LICENSE_KEY`}}", "playbook_remote_dir": "/tmp/packer-edx-playbooks", "venv_dir": "/edx/app/edx_ansible/venvs/edx_ansible", "ami": "{{env `JENKINS_WORKER_AMI`}}", @@ -63,7 +63,7 @@ "command": ". {{user `venv_dir`}}/bin/activate && ansible-playbook", "inventory_groups": "jenkins_worker", "extra_arguments": [ - "-e \"jenkins_edx_platform_version={{user `test_platform_version`}} NEWRELIC_LICENSE_KEY={{user `new_relic_key`}} initialize_replica_set=false mongo_configure_replica_set=false jenkins_worker_key_url='{{user `jenkins_worker_key_url`}}'\"", + "-e \"jenkins_edx_platform_version={{user `test_platform_version`}} NEWRELIC_INFRASTRUCTURE_LICENSE_KEY={{user `new_relic_infrastructure_license_key`}} initialize_replica_set=false mongo_configure_replica_set=false jenkins_worker_key_url='{{user `jenkins_worker_key_url`}}'\"", "-vvv" ] }, { diff --git a/util/packer/jenkins_worker_android.json b/util/packer/jenkins_worker_android.json index 0c07fde3698..71774503a08 100644 --- a/util/packer/jenkins_worker_android.json +++ b/util/packer/jenkins_worker_android.json @@ -2,7 +2,7 @@ "variables": { "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", - "new_relic_key": "{{env `NEW_RELIC_KEY`}}", + "new_relic_infrastructure_license_key": "{{env `NEWRELIC_INFRASTRUCTURE_LICENSE_KEY`}}", "playbook_remote_dir": "/tmp/packer-edx-playbooks", "venv_dir": "/edx/app/edx_ansible/venvs/edx_ansible", "ami": "{{env `JENKINS_WORKER_AMI`}}", @@ -62,7 +62,7 @@ "command": ". {{user `venv_dir`}}/bin/activate && ansible-playbook", "inventory_groups": "jenkins_worker", "extra_arguments": [ - "-e \"NEWRELIC_LICENSE_KEY={{user `new_relic_key`}} jenkins_worker_key_url='{{user `jenkins_worker_key_url`}}'\"", + "-e \"NEWRELIC_INFRASTRUCTURE_LICENSE_KEY={{user `new_relic_infrastructure_license_key`}} jenkins_worker_key_url='{{user `jenkins_worker_key_url`}}'\"", "-vvv" ] }] diff --git a/util/packer/jenkins_worker_loadtest.json b/util/packer/jenkins_worker_codejail.json similarity index 82% rename from util/packer/jenkins_worker_loadtest.json rename to util/packer/jenkins_worker_codejail.json index 988c69e2dae..5dbff1ba763 100644 --- a/util/packer/jenkins_worker_loadtest.json +++ b/util/packer/jenkins_worker_codejail.json @@ -2,7 +2,7 @@ "variables": { "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", - "new_relic_key": "{{env `NEW_RELIC_KEY`}}", + "new_relic_infrastructure_license_key": "{{env `NEWRELIC_INFRASTRUCTURE_LICENSE_KEY`}}", "playbook_remote_dir": "/tmp/packer-edx-playbooks", "venv_dir": "/edx/app/edx_ansible/venvs/edx_ansible", "ami": "{{env `JENKINS_WORKER_AMI`}}", @@ -15,12 +15,12 @@ "type": "amazon-ebs", "access_key": "{{user `aws_access_key`}}", "secret_key": "{{user `aws_secret_key`}}", - "ami_name": "jenkins_worker_loadtest_driver-{{isotime | clean_ami_name}}", + "ami_name": "jenkins_worker_codejail-{{isotime | clean_ami_name}}", "instance_type": "m3.medium", "region": "us-east-1", "source_ami": "{{user `ami`}}", "ssh_username": "ubuntu", - "ami_description": "jenkins worker loadtest driver", + "ami_description": "jenkins worker codejail", "iam_instance_profile": "jenkins-worker", "security_group_id": "{{user `security_group`}}", "tags": { @@ -57,12 +57,12 @@ "command": "rm ../../playbooks/edx-east" }, { "type": "ansible-local", - "playbook_file": "../../playbooks/jenkins_worker_loadtest_driver.yml", + "playbook_file": "../../playbooks/jenkins_worker_codejail.yml", "playbook_dir": "../../playbooks", "command": ". {{user `venv_dir`}}/bin/activate && ansible-playbook", "inventory_groups": "jenkins_worker", "extra_arguments": [ - "-e \"NEWRELIC_LICENSE_KEY={{user `new_relic_key`}} jenkins_worker_key_url='{{user `jenkins_worker_key_url`}}'\"", + "-e \"NEWRELIC_INFRASTRUCTURE_LICENSE_KEY={{user `new_relic_infrastructure_license_key`}} jenkins_worker_key_url='{{user `jenkins_worker_key_url`}}'\"", "-vvv" ] }] diff --git a/util/packer/jenkins_worker_simple.json b/util/packer/jenkins_worker_simple.json index 63d6e69327e..2d0fca29a9e 100644 --- a/util/packer/jenkins_worker_simple.json +++ b/util/packer/jenkins_worker_simple.json @@ -2,7 +2,7 @@ "variables": { "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", - "new_relic_key": "{{env `NEW_RELIC_KEY`}}", + "new_relic_infrastructure_license_key": "{{env `NEWRELIC_INFRASTRUCTURE_LICENSE_KEY`}}", "playbook_remote_dir": "/tmp/packer-edx-playbooks", "venv_dir": "/edx/app/edx_ansible/venvs/edx_ansible", "ami": "{{env `JENKINS_WORKER_AMI`}}", @@ -63,7 +63,7 @@ "command": ". {{user `venv_dir`}}/bin/activate && ansible-playbook", "inventory_groups": "jenkins_worker", "extra_arguments": [ - "-e \"jenkins_edx_platform_version={{user `test_platform_version`}} NEWRELIC_LICENSE_KEY={{user `new_relic_key`}} initialize_replica_set=false mongo_configure_replica_set=false jenkins_worker_key_url='{{user `jenkins_worker_key_url`}}'\"", + "-e \"jenkins_edx_platform_version={{user `test_platform_version`}} NEWRELIC_INFRASTRUCTURE_LICENSE_KEY={{user `new_relic_infrastructure_license_key`}} initialize_replica_set=false mongo_configure_replica_set=false jenkins_worker_key_url='{{user `jenkins_worker_key_url`}}'\"", "-vvv" ] }] diff --git a/util/parsefiles.py b/util/parsefiles.py index 585b6330d0d..f667aab5a3d 100644 --- a/util/parsefiles.py +++ b/util/parsefiles.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import +from __future__ import print_function import os import pathlib2 import logging @@ -6,6 +8,7 @@ import networkx as nx from collections import namedtuple import argparse +import six TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR") DOCKER_PATH_ROOT = pathlib2.Path(TRAVIS_BUILD_DIR, "docker", "build") @@ -145,9 +148,9 @@ def _open_yaml_file(file_str): with (file_str.open(mode='r')) as file: try: - yaml_file = yaml.load(file) + yaml_file = yaml.safe_load(file) return yaml_file - except yaml.YAMLError, exc: + except yaml.YAMLError as exc: LOGGER.error("error in configuration file: %s" % str(exc)) sys.exit(1) @@ -333,7 +336,7 @@ def _get_role_name(role): """ if isinstance(role, dict): return role['role'] - elif isinstance(role, basestring): + elif isinstance(role, six.string_types): return role else: LOGGER.warning("role %s could not be resolved to a role name." % role) @@ -464,4 +467,4 @@ def arg_parse(): all_plays = set(set(docker_plays) | set( modified_docker_files) | set(docker_plays_dir)) - print " ".join(all_plays) + print(" ".join(all_plays)) diff --git a/util/parsefiles_config.yml b/util/parsefiles_config.yml index 7bebc7c303d..580c899d752 100644 --- a/util/parsefiles_config.yml +++ b/util/parsefiles_config.yml @@ -4,11 +4,13 @@ aws_plays_paths: - playbooks docker_plays_paths: - docker/plays +docker_ignore_list: + - go-agent + - go-agent-marketing + - go-agent-frontend weights: - discovery: 6 - - go-agent: 3 - go-server: 5 - - go-agent-marketing: 4 - xqwatcher: 3 - analytics_api: 1 - edxapp: 28 @@ -19,6 +21,7 @@ weights: - xqueue: 2 - trusty-common: 5 - xenial-common: 6 + - bionic-common: 6 - ecommerce: 6 - rabbitmq: 2 - automated: 1 @@ -41,3 +44,9 @@ weights: - analytics_pipeline_hadoop_resourcemanager: 2 - analytics_pipeline_spark_master: 1 - analytics_pipeline_spark_worker: 1 + - chrome: 1 + - firefox: 1 + - flower: 1 + - registrar: 3 + - designer: 3 + - enterprise_catalog: 3 diff --git a/util/pingdom/create_pingdom_alerts.py b/util/pingdom/create_pingdom_alerts.py index 5bdc1a2df4c..e689193eaa5 100644 --- a/util/pingdom/create_pingdom_alerts.py +++ b/util/pingdom/create_pingdom_alerts.py @@ -1,9 +1,12 @@ +from __future__ import absolute_import +from __future__ import print_function import json import click import yaml import requests import json +from six.moves import map class PingdomInvalidResponse(Exception): @@ -28,7 +31,7 @@ def main(noop, pingdom_email, pingdom_password, pingdom_api_key, alert_config_file): with open(alert_config_file, 'r') as stream: - config_file_content = yaml.load(stream) + config_file_content = yaml.safe_load(stream) config_file_content = replace_user_names_with_userids(pingdom_email, pingdom_password, pingdom_api_key, @@ -38,16 +41,16 @@ def main(noop, pingdom_email, pingdom_password, checks_by_hostname = build_checks_by_hostname(pingdom_email, pingdom_password, pingdom_api_key) - hostnames = checks_by_hostname.keys() + hostnames = list(checks_by_hostname.keys()) for alert_config in config_file_content['checks']: if alert_config['host'] not in hostnames: # Create new check if noop: - print("Would CREATE: {0}, but you set the noop flag.".format( - alert_config)) + print(("Would CREATE: {0}, but you set the noop flag.".format( + alert_config))) else: - print("CREATE: {0}".format(alert_config)) + print(("CREATE: {0}".format(alert_config))) create_check(pingdom_email, pingdom_password, pingdom_api_key, alert_config) @@ -55,12 +58,12 @@ def main(noop, pingdom_email, pingdom_password, # Updating existing check existing_check = checks_by_hostname[alert_config['host']] if noop: - print(""" + print((""" Has changes, would UPDATE: {0}, but you set the noop flag. - """.format(alert_config)) + """.format(alert_config))) else: - print("Attempting UPDATE: {0}".format(alert_config)) + print(("Attempting UPDATE: {0}".format(alert_config))) # We always update because the parameters to POST check # and the paramters returned by GET check differ. # It would be difficult to figure out if changes @@ -83,7 +86,7 @@ def replace_user_names_with_userids(pingdom_email, for user in alert['users']: if 'userids' in alert: user_ids.extend( - map(lambda x: x.strip(), alert['userids'].split(','))) + [x.strip() for x in alert['userids'].split(',')]) if user not in user_ids_by_name: raise PingdomInvalidResponse( 'Pingdom has no user with the name {0}'.format(user)) @@ -103,7 +106,7 @@ def integration_names_to_ids(config_file_content): if('integrationids' in alert): integration_ids.extend( alert['integrationids'].split(',')) - if integration not in integration_ids_by_name.keys(): + if integration not in list(integration_ids_by_name.keys()): print( """ You specified a integration @@ -213,15 +216,15 @@ def build_userid_by_name(pingdom_email, pingdom_password, pingdom_api_key): def print_request_and_response(response): print("Request:") for key in response.request.headers: - print("{0}: {1}".format(key, response.request.headers[key])) + print(("{0}: {1}".format(key, response.request.headers[key]))) print("") - print(response.request.body) + print((response.request.body)) print("------------------") print("Response:") for key in response.headers: - print("{0}: {1}".format(key, response.headers[key])) + print(("{0}: {1}".format(key, response.headers[key]))) print("") - print(response.content.decode('utf-8')) + print((response.content.decode('utf-8'))) print("------------------") diff --git a/util/pingdom/requirements.txt b/util/pingdom/requirements.txt index 503ff91506c..a219eadfd80 100644 --- a/util/pingdom/requirements.txt +++ b/util/pingdom/requirements.txt @@ -4,10 +4,11 @@ # # make upgrade # -certifi==2018.4.16 # via requests +certifi==2020.4.5.1 # via requests chardet==3.0.4 # via requests -click==6.7 -idna==2.6 # via requests -pyyaml==3.12 -requests==2.18.4 -urllib3==1.22 # via requests +click==6.7 # via -r requirements/pingdom.in +idna==2.7 # via requests +pyyaml==3.12 # via -r requirements/pingdom.in +requests==2.20.0 # via -r requirements/pingdom.in +six==1.14.0 # via -r requirements/pingdom.in +urllib3==1.24.3 # via requests diff --git a/util/publish_rds_logs_to_cloudwatch.py b/util/publish_rds_logs_to_cloudwatch.py new file mode 100755 index 00000000000..1c1d1923275 --- /dev/null +++ b/util/publish_rds_logs_to_cloudwatch.py @@ -0,0 +1,65 @@ +#!/usr/bin/python3 +""" +Publish RDS logs to cloudwatch +Example: + ./publish_rds_logs_to_cloudwatch --db_engine mysql --db_identifier edx-mysql-db + ./publish_rds_logs_to_cloudwatch --db_engine aurora --db_identifier edx-aurora-cluster + +""" +from __future__ import absolute_import +from __future__ import print_function +import boto3 +import argparse + +def get_client(): + + rds_client = boto3.client('rds') + return rds_client + +def publish_rds_logs_to_cloudwatch(db_engine,db_identifier,logs_to_publish): + + client = get_client() + try: + if db_engine == "mysql": + response = client.modify_db_instance( + DBInstanceIdentifier=db_identifier, + CloudwatchLogsExportConfiguration={ + 'EnableLogTypes': [ + logs_to_publish + ] + } + ) + if response["ResponseMetadata"]["HTTPStatusCode"] == 200: + id=response["DBInstance"]["DBInstanceIdentifier"] + logs_exports_to_cloudwatch=response["DBInstance"]["EnabledCloudwatchLogsExports"] + print(("RDS MySQL DB {} logs {} are enabled to exports to cloudwatch" \ + .format(id,logs_exports_to_cloudwatch))) + elif db_engine == "aurora": + response = client.modify_db_cluster( + DBClusterIdentifier=db_identifier, + CloudwatchLogsExportConfiguration={ + 'EnableLogTypes':[ + logs_to_publish + ] + } + ) + if response["ResponseMetadata"]["HTTPStatusCode"] == 200: + id=response["DBCluster"]["DBClusterIdentifier"] + logs_exports_to_cloudwatch=response["DBCluster"]["EnabledCloudwatchLogsExports"] + print(("RDS Aurora Cluster {} logs {} are enabled to exports to cloudwatch" \ + .format(id,logs_exports_to_cloudwatch))) + else: + print("db_engine valid options are: mysql or aurora") + exit() + except Exception as e: + print(e) + +if __name__=="__main__": + + parser = argparse.ArgumentParser() + parser.add_argument('--db_engine', help='RDS engine: mysql or aurora',required=True) + parser.add_argument('--db_identifier', help='RDS instance ID',required=True) + parser.add_argument('--logs_to_publish',help='Logs to export to cloudwatch',default='error') + + args = parser.parse_args() + publish_rds_logs_to_cloudwatch(args.db_engine,args.db_identifier,args.logs_to_publish) diff --git a/util/rabbitmq/shovel.py b/util/rabbitmq/shovel.py index 88ede3768c6..dfddd99d7ba 100644 --- a/util/rabbitmq/shovel.py +++ b/util/rabbitmq/shovel.py @@ -1,9 +1,12 @@ #!/usr/bin/env python +from __future__ import absolute_import +from __future__ import print_function import argparse import subprocess import requests from requests.exceptions import HTTPError import sys +import six parser=argparse.ArgumentParser(description='Shovels between RabbitMQ Clusters') parser.add_argument('--src_host',action='store',dest='src_host') @@ -26,7 +29,7 @@ def list_vhosts(): response.raise_for_status() vhosts=[v['name'] for v in response.json() if v['name'].startswith('/')] except Exception as ex: - print "Failed to get vhosts: {}".format(ex) + print("Failed to get vhosts: {}".format(ex)) sys.exit(1) return vhosts @@ -38,7 +41,7 @@ def list_queues(): response.raise_for_status() queues=[q['name'] for q in response.json()] except Exception as ex: - print "Failed to get queues: {}".format(ex) + print("Failed to get queues: {}".format(ex)) sys.exit(1) return queues @@ -65,10 +68,10 @@ def create_shovel(shovel,arg): q=queue.split('.') if (q[0]!='celeryev' and q[-1]!='pidbox'): args='{{"src-uri": "{}", "src-queue": "{}","dest-uri": "{}","dest-queue": "{}"}}'.format(src_uri,queue,dest_uri,queue) - print "Running shovel for queue:{}".format(queue) + print("Running shovel for queue:{}".format(queue)) shovel_output=create_shovel(queue,args) if shovel_output is not None: - content=unicode(shovel_output,"utf-8") + content=six.text_type(shovel_output,"utf-8") output[queue]=content for k,v in output.items(): - print k,v + print(k,v) diff --git a/util/rds_sgs/rds_sgs.py b/util/rds_sgs/rds_sgs.py new file mode 100755 index 00000000000..0d718f14afa --- /dev/null +++ b/util/rds_sgs/rds_sgs.py @@ -0,0 +1,82 @@ +#!/usr/bin/python3 + +from __future__ import absolute_import +from __future__ import print_function +import boto3 +import click + +@click.command() +@click.argument('mode', type=click.Choice(['by_db', 'by_sg'])) +def command(mode): + """ + MODES: + + by_db: List rules for all RDS instances and which security group(s) they come from + + by_sg: shows each security group and which RDS instances are using it + """ + client = boto3.client('rds') + ec2_client = boto3.client('ec2') + dbs = client.describe_db_instances() + dbs_by_sg = {} + for db in dbs['DBInstances']: + open_ports = {} + sg_ids = [sg['VpcSecurityGroupId'] for sg in db['VpcSecurityGroups']] + for sg_id in sg_ids: + sg = ec2_client.describe_security_groups(GroupIds=[sg_id])['SecurityGroups'][0] + sg_id_and_name = "{} ({})".format(sg_id, sg['GroupName']) + if sg_id_and_name in dbs_by_sg: + dbs_by_sg[sg_id_and_name].append(db['DBInstanceIdentifier']) + else: + dbs_by_sg[sg_id_and_name] = [db['DBInstanceIdentifier']] + + if mode == 'by_db': + for permission in sg['IpPermissions']: + if permission['FromPort'] == permission['ToPort']: + ports = permission['FromPort'] + else: + ports = "{}-{}".format(permission['FromPort'],permission['ToPort']) + for IpRange in permission['IpRanges']: + key = IpRange['CidrIp'] + desc = sg['GroupName'] + if 'Description' in IpRange: + desc = "{}|{}".format(desc, IpRange['Description']) + + if ports in open_ports: + if key in open_ports[ports]: + open_ports[ports][key][sg_id] = desc + else: + open_ports[ports][key] = {sg_id: desc} + else: + open_ports[ports] = {key: {sg_id: desc}} + for UserIdGroupPair in permission['UserIdGroupPairs']: + source_sg_id = UserIdGroupPair['GroupId'] + key = "{} ({})".format(source_sg_id, ec2_client.describe_security_groups(GroupIds=[source_sg_id])['SecurityGroups'][0]['GroupName']) + + desc = sg['GroupName'] + if 'Description' in UserIdGroupPair: + desc = "{}|{}".format(desc, UserIdGroupPair['Description']) + + if ports in open_ports: + if key in open_ports[ports]: + open_ports[ports][key][sg_id] = desc + else: + open_ports[ports][key] = {sg_id: desc} + else: + open_ports[ports] = {key: {sg_id: desc}} + + for ports,sources in open_ports.items(): + for source in sorted(sources.keys()): + sgs = [] + for sg_id in sorted(sources[source].keys()): + output = sg_id + if sources[source][sg_id]: + output = "{} ({})".format(output, sources[source][sg_id]) + sgs.append(output) + print(("{: <40} {: <11} {: <70} {}".format(db['DBInstanceIdentifier'], ports, source, ", ".join(sgs)))) + if mode == 'by_sg': + for sg,dbs in dbs_by_sg.items(): + print(("{: <70} {: <4} {}".format(sg, len(dbs), ", ".join(dbs)))) + +if __name__ == '__main__': + command() diff --git a/util/rds_sgs/requirements.txt b/util/rds_sgs/requirements.txt new file mode 100644 index 00000000000..3debd504d24 --- /dev/null +++ b/util/rds_sgs/requirements.txt @@ -0,0 +1,2 @@ +boto3==1.9.2 +click==6.7 diff --git a/util/s3_acl.py b/util/s3_acl.py new file mode 100644 index 00000000000..1a1cd9cccf2 --- /dev/null +++ b/util/s3_acl.py @@ -0,0 +1,193 @@ +#!/usr/bin/python3 +""" +Get current ACL of all objects in given S3 bucket or set them to private or revert back. +Script supports 3 operations +1- getacl +2- setaclprivate +3- revertacl + +1 optional parameter +whitelist (optional) (provide multiple whitelist parameters to filter out) + +It saves current ACL in a file named bucketname.txt for updating or reverting purposes. + +python s3_acl.py --bucketname --operation getacl --whitelist + +Should assume role to run this script. +""" + + +import boto3 +from botocore.exceptions import ClientError +import backoff +import sys +import json +import click +import logging + +MAX_TRIES = 5 +region = "us-east-1" +# Set logging configuration +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) +# create file handler that logs messages +filehandler = logging.FileHandler('s3_acl.log') +filehandler.setLevel(logging.INFO) +formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') +filehandler.setFormatter(formatter) +# add the handlers to logger +logger.addHandler(filehandler) + + +class S3BotoWrapper: + def __init__(self, **kwargs): + self.client = boto3.client("s3", **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def get_object(self, *args, **kwargs): + return self.client.list_objects_v2(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def get_acl(self, *args, **kwargs): + return self.client.get_object_acl(*args, **kwargs) + + @backoff.on_exception(backoff.expo, + ClientError, + max_tries=MAX_TRIES) + def put_acl(self, *args, **kwargs): + return self.client.put_object_acl(*args, **kwargs) + + +def get_all_s3_keys(s3_bucket, region, whitelist): + """Get a list of all keys in an S3 bucket.""" + keys = [] + kwargs = {'Bucket': s3_bucket} + while True: + s3_client = S3BotoWrapper(region_name=region) + resp = s3_client.get_object(**kwargs) + for obj in resp['Contents']: + # Filter out directories, you can add more filters here if required. + if obj['Key'][-1] == '/' or any(obj['Key'].startswith(whitelist_object) for whitelist_object in whitelist): + continue + else: + keys.append(obj['Key']) + try: + kwargs['ContinuationToken'] = resp['NextContinuationToken'] + except KeyError: + break + return keys + + +def set_acl_private(acl_list, bucket_name, whitelist): + s3_client = S3BotoWrapper(region_name=region) + for item in acl_list: + for key, value in item.items(): + if any(key.startswith(whitelist_object) for whitelist_object in whitelist): + continue + else: + try: + s3_client.put_acl( + ACL='private', + Bucket=bucket_name, + Key=key, + ) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchKey': + logger.warning("No such key in S3: " + key) # Will send the errors to the file + else: + logger.error(("Unexpected error :{}".format(e))) + sys.exit(1) + + +def revert_s3_acl(acl_list, bucket_name, whitelist): + s3_client = S3BotoWrapper(region_name=region) + for item in acl_list: + for key, value in item.items(): + if any(key.startswith(whitelist_object) for whitelist_object in whitelist): + continue + else: + try: + value.pop('ResponseMetadata', None) + s3_client.put_acl( + AccessControlPolicy=value, + Bucket=bucket_name, + Key=key, + ) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchKey': + logger.warning("No such key in S3: " + key) # Will send the errors to the file + else: + logger.error(("Unexpected error :{}".format(e))) + sys.exit(1) + + +def get_s3_acl(s3_bucket, whitelist): + s3_client = S3BotoWrapper(region_name=region) + response_list = [] + try: + s3_objects_key = get_all_s3_keys(s3_bucket, region, whitelist) + except ClientError as e: + logger.error(("Unable to connect to AWS with error :{}".format(e))) + sys.exit(1) + for object_key in s3_objects_key: + try: + temp = {} + response = s3_client.get_acl(Bucket=s3_bucket, Key=object_key) + temp[object_key] = response + response_list.append(temp) + except ClientError as e: + if e.response['Error']['Code'] == 'AccessDenied': + logger.warning("You Don't have permission to access this object: " + object_key) + elif e.response['Error']['Code'] == 'NoSuchKey': + logger.warning("No such key in S3: " + object_key) # Will send the errors to the file + else: + logger.error(("Unexpected error :{}".format(e))) + sys.exit(1) + return response_list + + +@click.command() +@click.option('--bucketname', required=True, help='S3 bucket name') +@click.option('--operation', required=True, help='Operation name to perform i.e 1- getacl 2- setaclprivate 3- revertacl') +@click.option('--whitelist', '-i', multiple=True, help='S3 objects name to avoid') +def controller(bucketname, operation, whitelist): + file_to_write = bucketname + ".txt" + if operation == 'getacl': + objects_acl = get_s3_acl(bucketname, whitelist) + with open(file_to_write, 'w') as fout: + json.dump(objects_acl, fout) + logger.info("Task completed. Total numbers of objects read are: " + str(len(objects_acl))) + elif operation == 'setaclprivate': + try: + data = [] + with open(file_to_write, "r") as inFile: + data = json.load(inFile) + set_acl_private(data, bucketname, whitelist) + logger.info("Task completed. ACL of " + bucketname + " objects set to private.") + except IOError: + logger.error("File not accessible") + sys.exit(1) + elif operation == 'revertacl': + try: + data = [] + with open(file_to_write, "r") as inFile: + data = json.load(inFile) + revert_s3_acl(data, bucketname, whitelist) + logger.info("Task completed. ACL of " + bucketname + " objects reverted to given state") + except IOError: + logger.error("File not accessible") + sys.exit(1) + else: + logger.error("Invalid Operation. Please enter valid operation. Operation supported are i.e 1- getacl " + "2- setaclprivate 3- revertacl ") # Will send the errors to the file + sys.exit(0) + + +if __name__ == '__main__': + controller() + diff --git a/util/vpc-tools/abbey.py b/util/vpc-tools/abbey.py deleted file mode 100644 index ad8923cba54..00000000000 --- a/util/vpc-tools/abbey.py +++ /dev/null @@ -1,952 +0,0 @@ -#!/usr/bin/env python -u -import sys -from argparse import ArgumentParser -import time -import json -import yaml -import os -import requests -try: - import boto.ec2 - import boto.sqs - import boto.vpc - from boto.exception import NoAuthHandlerFound, EC2ResponseError - from boto.sqs.message import RawMessage - from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping -except ImportError: - print "boto required for script" - sys.exit(1) - -from pprint import pprint - -AMI_TIMEOUT = 2700 # time to wait for AMIs to complete(45 minutes) -EC2_RUN_TIMEOUT = 180 # time to wait for ec2 state transition -EC2_STATUS_TIMEOUT = 300 # time to wait for ec2 system status checks -NUM_TASKS = 5 # number of tasks for time summary report -NUM_PLAYBOOKS = 2 - - -class Unbuffered: - """ - For unbuffered output, not - needed if PYTHONUNBUFFERED is set - """ - def __init__(self, stream): - self.stream = stream - - def write(self, data): - self.stream.write(data) - self.stream.flush() - - def __getattr__(self, attr): - return getattr(self.stream, attr) - -sys.stdout = Unbuffered(sys.stdout) - - -def parse_args(): - parser = ArgumentParser() - parser.add_argument('--noop', action='store_true', - help="don't actually run the cmds", - default=False) - parser.add_argument('--secure-vars-file', required=False, - metavar="SECURE_VAR_FILE", default=None, - help="path to secure-vars from the root of " - "the secure repo. By default .yml and " - "-.yml will be used if they " - "exist in /ansible/vars/. This secure file " - "will be used in addition to these if they exist.") - parser.add_argument('--stack-name', - help="defaults to ENVIRONMENT-DEPLOYMENT", - metavar="STACK_NAME", - required=False) - parser.add_argument('-p', '--play', - help='play name without the yml extension', - metavar="PLAY", required=True) - parser.add_argument('--playbook-dir', - help='directory to find playbooks in', - default='configuration/playbooks', - metavar="PLAYBOOKDIR", required=False) - parser.add_argument('-d', '--deployment', metavar="DEPLOYMENT", - required=True) - parser.add_argument('-e', '--environment', metavar="ENVIRONMENT", - required=True) - parser.add_argument('-v', '--verbose', action='store_true', - help="turn on verbosity") - parser.add_argument('--no-cleanup', action='store_true', - help="don't cleanup on failures") - parser.add_argument('--vars', metavar="EXTRA_VAR_FILE", - help="path to extra var file", required=False) - parser.add_argument('--configuration-version', required=False, - help="configuration repo gitref", - default="master") - parser.add_argument('--configuration-secure-version', required=False, - help="configuration-secure repo gitref", - default="master") - parser.add_argument('--configuration-secure-repo', required=False, - default="git@github.com:edx-ops/prod-secure", - help="repo to use for the secure files") - parser.add_argument('--configuration-internal-version', required=False, - help="configuration-internal repo gitref", - default="master") - parser.add_argument('--configuration-internal-repo', required=False, - default="", - help="repo to use for internal (non-secure) configuration data") - parser.add_argument('--configuration-private-version', required=False, - help="configuration-private repo gitref", - default="master") - parser.add_argument('--configuration-private-repo', required=False, - default="", - help="repo to use for private playbooks") - parser.add_argument('-c', '--cache-id', required=True, - help="unique id to use as part of cache prefix") - parser.add_argument('-i', '--identity', required=False, - help="path to identity file for pulling " - "down configuration-secure", - default=None) - parser.add_argument('-r', '--region', required=False, - default="us-east-1", - help="aws region") - parser.add_argument('-k', '--keypair', required=False, - default="deployment", - help="AWS keypair to use for instance") - parser.add_argument('-t', '--instance-type', required=False, - default="m1.large", - help="instance type to launch") - parser.add_argument("--role-name", required=False, - default="abbey", - help="IAM role name to use (must exist)") - parser.add_argument("--msg-delay", required=False, - default=5, - help="How long to delay message display from sqs " - "to ensure ordering") - parser.add_argument("--hipchat-room-id", required=False, - default=None, - help="The API ID of the Hipchat room to post" - "status messages to") - parser.add_argument("--ansible-hipchat-room-id", required=False, - default='Hammer', - help="The room used by the abbey instance for " - "printing verbose ansible run data.") - parser.add_argument("--hipchat-api-token", required=False, - default=None, - help="The API token for Hipchat integration") - parser.add_argument("--callback-url", required=False, - default=None, - help="The callback URL to send notifications to") - parser.add_argument("--root-vol-size", required=False, - default=50, - help="The size of the root volume to use for the " - "abbey instance.") - parser.add_argument("--datadog-api-key", required=False, - default="", - help="The datadog api key used for capturing task" - "and playbook metrics abbey instance.") - - group = parser.add_mutually_exclusive_group() - group.add_argument('-b', '--base-ami', required=False, - help="ami to use as a base ami", - default="ami-cd0f5cb6") - group.add_argument('--blessed', action='store_true', - help="Look up blessed ami for env-dep-play.", - default=False) - - return parser.parse_args() - -def get_instance_sec_group(vpc_id): - - grp_details = ec2.get_all_security_groups( - filters={ - 'vpc_id': vpc_id, - 'tag:play': args.play - } - ) - - if len(grp_details) < 1: - # - # try scheme for non-cloudformation builds - # - - grp_details = ec2.get_all_security_groups( - filters={ - 'tag:play': args.play, - 'tag:environment': args.environment, - 'tag:deployment': args.deployment} - ) - - if len(grp_details) < 1: - sys.stderr.write("ERROR: Expected atleast one security group, got {}\n".format( - len(grp_details))) - - return grp_details[0].id - - -def get_blessed_ami(): - images = ec2.get_all_images( - filters={ - 'tag:environment': args.environment, - 'tag:deployment': args.deployment, - 'tag:play': args.play, - 'tag:blessed': True - } - ) - - if len(images) != 1: - raise Exception("ERROR: Expected only one blessed ami, got {}\n".format( - len(images))) - - return images[0].id - - -def create_instance_args(): - """ - Looks up security group, subnet - and returns arguments to pass into - ec2.run_instances() including - user data - """ - - vpc = boto.vpc.connect_to_region(args.region) - subnet = vpc.get_all_subnets( - filters={ - 'tag:aws:cloudformation:stack-name': stack_name, - 'tag:play': args.play} - ) - - if len(subnet) < 1: - # - # try scheme for non-cloudformation builds - # - - subnet = vpc.get_all_subnets( - filters={ - 'tag:play': args.play, - 'tag:environment': args.environment, - 'tag:deployment': args.deployment} - ) - - if len(subnet) < 1: - sys.stderr.write("ERROR: Expected at least one subnet, got {} for {}-{}-{}\n".format( - len(subnet), args.environment, args.deployment, args.play)) - sys.exit(1) - subnet_id = subnet[0].id - vpc_id = subnet[0].vpc_id - - security_group_id = get_instance_sec_group(vpc_id) - - if args.identity: - config_secure = 'true' - with open(args.identity) as f: - identity_contents = f.read() - else: - config_secure = 'false' - identity_contents = "dummy" - - user_data = """#!/bin/bash -set -x -set -e -exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 -base_dir="/var/tmp/edx-cfg" -extra_vars="$base_dir/extra-vars-$$.yml" -secure_identity="$base_dir/secure-identity" -git_ssh="$base_dir/git_ssh.sh" -configuration_version="{configuration_version}" -configuration_secure_version="{configuration_secure_version}" -configuration_private_version="{configuration_private_version}" -configuration_internal_version="{configuration_internal_version}" -environment="{environment}" -deployment="{deployment}" -play="{play}" -cluster="{play}" -config_secure={config_secure} -git_repo_name="configuration" -git_repo="https://github.com/edx/$git_repo_name" -git_repo_secure="{configuration_secure_repo}" -git_repo_secure_name=$(basename $git_repo_secure .git) -git_repo_private="{configuration_private_repo}" -git_repo_private_name=$(basename $git_repo_private .git) -git_repo_internal="{configuration_internal_repo}" -git_repo_internal_name=$(basename $git_repo_internal .git) -secure_vars_file={secure_vars_file} -environment_deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{environment}-{deployment}.yml" -deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{deployment}.yml" -environment_deployment_internal_vars="$base_dir/$git_repo_internal_name/ansible/vars/{environment}-{deployment}.yml" -deployment_internal_vars="$base_dir/$git_repo_internal_name/ansible/vars/{deployment}.yml" -instance_id=\\ -$(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null) -instance_ip=\\ -$(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null) -instance_type=\\ -$(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null) -playbook_dir="$base_dir/{playbook_dir}" - -if $config_secure; then - git_cmd="env GIT_SSH=$git_ssh git" -else - git_cmd="git" -fi - -ANSIBLE_ENABLE_SQS=true -SQS_NAME={queue_name} -SQS_REGION={region} -SQS_MSG_PREFIX="[ $instance_id $instance_ip $environment-$deployment $play ]" -PYTHONUNBUFFERED=1 -HIPCHAT_TOKEN={hipchat_token} -HIPCHAT_ROOM={hipchat_room} -HIPCHAT_MSG_PREFIX="$environment-$deployment-$play: " -HIPCHAT_FROM="ansible-$instance_id" -HIPCHAT_MSG_COLOR=$(echo -e "yellow\\ngreen\\npurple\\ngray" | shuf | head -1) -DATADOG_API_KEY={datadog_api_key} -# environment for ansible -export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED -export HIPCHAT_TOKEN HIPCHAT_ROOM HIPCHAT_MSG_PREFIX HIPCHAT_FROM -export HIPCHAT_MSG_COLOR DATADOG_API_KEY - - -#################################### Lifted from ansible-bootstrap.sh -if [[ -z "$ANSIBLE_REPO" ]]; then - ANSIBLE_REPO="https://github.com/edx/ansible.git" -fi - -if [[ -z "$ANSIBLE_VERSION" ]]; then - ANSIBLE_VERSION="master" -fi - -if [[ -z "$CONFIGURATION_REPO" ]]; then - CONFIGURATION_REPO="https://github.com/edx/configuration.git" -fi - -if [[ -z "$CONFIGURATION_VERSION" ]]; then - CONFIGURATION_VERSION="master" -fi - -if [[ -z "$UPGRADE_OS" ]]; then - UPGRADE_OS=false -fi - -# -# Bootstrapping constants -# -VIRTUAL_ENV_VERSION="15.2.0" -PIP_VERSION="9.0.3" -SETUPTOOLS_VERSION="39.0.1" -EDX_PPA_KEY_SERVER="keyserver.ubuntu.com" -EDX_PPA_KEY_ID="B41E5E3969464050" - -cat << EOF -****************************************************************************** - -Running the abbey with the following arguments: - -ANSIBLE_REPO="$ANSIBLE_REPO" -ANSIBLE_VERSION="$ANSIBLE_VERSION" -CONFIGURATION_REPO="$CONFIGURATION_REPO" -CONFIGURATION_VERSION="$CONFIGURATION_VERSION" - -****************************************************************************** -EOF - - -if [[ $(id -u) -ne 0 ]] ;then - echo "Please run as root"; - exit 1; -fi - -if grep -q 'Trusty Tahr' /etc/os-release -then - SHORT_DIST="trusty" -elif grep -q 'Xenial Xerus' /etc/os-release -then - SHORT_DIST="xenial" -else - cat << EOF - - This script is only known to work on Ubuntu Trusty and Xenial, - exiting. If you are interested in helping make installation possible - on other platforms, let us know. - -EOF - exit 1; -fi - -EDX_PPA="deb http://ppa.edx.org $SHORT_DIST main" - -# Upgrade the OS -apt-get update -y -apt-key update -y - -if [ "$UPGRADE_OS" = true ]; then - echo "Upgrading the OS..." - apt-get upgrade -y -fi - -# Required for add-apt-repository -apt-get install -y software-properties-common python-software-properties - -# Add git PPA -add-apt-repository -y ppa:git-core/ppa - -# For older distributions we need to install a PPA for Python 2.7.10 -if [[ "trusty" = "$SHORT_DIST" ]]; then - - # Add python PPA - apt-key adv --keyserver "$EDX_PPA_KEY_SERVER" --recv-keys "$EDX_PPA_KEY_ID" - add-apt-repository -y "$EDX_PPA" -fi - -# Install python 2.7 latest, git and other common requirements -# NOTE: This will install the latest version of python 2.7 and -# which may differ from what is pinned in virtualenvironments -apt-get update -y - -apt-get install -y python2.7 python2.7-dev python-pip python-apt python-yaml python-jinja2 build-essential sudo git-core libmysqlclient-dev libffi-dev libssl-dev -pip install --upgrade pip=="$PIP_VERSION" - -# pip moves to /usr/local/bin when upgraded -hash -r #pip may have moved from /usr/bin/ to /usr/local/bin/. This clears bash's path cache. -PATH=/usr/local/bin:$PATH -pip install setuptools=="$SETUPTOOLS_VERSION" -pip install virtualenv=="$VIRTUAL_ENV_VERSION" - - -##################### END Lifted from ansible-bootstrap.sh - - -# python3 is required for certain other things -# (currently xqwatcher so it can run python2 and 3 grader code, -# but potentially more in the future). -/usr/bin/apt-get install -y python3-pip python3-dev - -# this is missing on 14.04 (base package on 12.04) -# we need to do this on any build, since the above apt-get -# only runs on a build from scratch -/usr/bin/apt-get install -y python-httplib2 --force-yes - -rm -rf $base_dir -mkdir -p $base_dir -cd $base_dir - -cat << EOF > $git_ssh -#!/bin/sh -exec /usr/bin/ssh -o StrictHostKeyChecking=no -i "$secure_identity" "\$@" -EOF - -chmod 755 $git_ssh - -if $config_secure; then - cat << EOF > $secure_identity -{identity_contents} -EOF -fi - -cat << EOF >> $extra_vars ---- -# extra vars passed into -# abbey.py including versions -# of all the repositories -{extra_vars_yml} - -# abbey will always run fake migrations -# this is so that the application can come -# up healthy -fake_migrations: true - -disable_edx_services: true -COMMON_TAG_EC2_INSTANCE: true - -# abbey should never take instances in -# and out of elbs -elb_pre_post: false -EOF - -chmod 400 $secure_identity - -$git_cmd clone $git_repo $git_repo_name -cd $git_repo_name -$git_cmd checkout $configuration_version -cd $base_dir - -if $config_secure; then - $git_cmd clone $git_repo_secure $git_repo_secure_name - cd $git_repo_secure_name - $git_cmd checkout $configuration_secure_version - cd $base_dir -fi - -if [[ ! -z $git_repo_private ]]; then - $git_cmd clone $git_repo_private $git_repo_private_name - cd $git_repo_private_name - $git_cmd checkout $configuration_private_version - cd $base_dir -fi - -if [[ ! -z $git_repo_internal ]]; then - $git_cmd clone $git_repo_internal $git_repo_internal_name - cd $git_repo_internal_name - $git_cmd checkout $configuration_internal_version - cd $base_dir -fi - - -cd $base_dir/$git_repo_name -sudo pip install -r pre-requirements.txt -sudo pip install -r requirements.txt - -cd $playbook_dir - -if [[ -r "$deployment_internal_vars" ]]; then - extra_args_opts+=" -e@$deployment_internal_vars" -fi - -if [[ -r "$environment_deployment_internal_vars" ]]; then - extra_args_opts+=" -e@$environment_deployment_internal_vars" -fi - -if [[ -r "$deployment_secure_vars" ]]; then - extra_args_opts+=" -e@$deployment_secure_vars" -fi - -if [[ -r "$environment_deployment_secure_vars" ]]; then - extra_args_opts+=" -e@$environment_deployment_secure_vars" -fi - -if $secure_vars_file; then - extra_args_opts+=" -e@$secure_vars_file" -fi - -extra_args_opts+=" -e@$extra_vars" - -ansible-playbook -vvvv -c local -i "localhost," --skip-tags="install:devstack,migrate:devstack" $play.yml $extra_args_opts -ansible-playbook -vvvv -c local -i "localhost," --skip-tags="install:devstack,migrate:devstack" stop_all_edx_services.yml $extra_args_opts - -rm -rf $base_dir - - """.format( - hipchat_token=args.hipchat_api_token, - hipchat_room=args.ansible_hipchat_room_id, - configuration_version=args.configuration_version, - configuration_secure_version=args.configuration_secure_version, - configuration_secure_repo=args.configuration_secure_repo, - configuration_private_version=args.configuration_private_version, - configuration_private_repo=args.configuration_private_repo, - configuration_internal_version=args.configuration_internal_version, - configuration_internal_repo=args.configuration_internal_repo, - environment=args.environment, - deployment=args.deployment, - play=args.play, - playbook_dir=args.playbook_dir, - config_secure=config_secure, - identity_contents=identity_contents, - queue_name=run_id, - extra_vars_yml=extra_vars_yml, - secure_vars_file=secure_vars_file, - cache_id=args.cache_id, - datadog_api_key=args.datadog_api_key, - region=args.region) - - mapping = BlockDeviceMapping() - root_vol = BlockDeviceType(size=args.root_vol_size, - volume_type='gp2') - mapping['/dev/sda1'] = root_vol - - ec2_args = { - 'security_group_ids': [security_group_id], - 'subnet_id': subnet_id, - 'key_name': args.keypair, - 'image_id': base_ami, - 'instance_type': args.instance_type, - 'instance_profile_name': args.role_name, - 'user_data': user_data, - 'block_device_map': mapping, - } - - return ec2_args - - -def poll_sqs_ansible(): - """ - Prints events to the console and - blocks until a final STATS ansible - event is read off of SQS. - - SQS does not guarantee FIFO, for that - reason there is a buffer that will delay - messages before they are printed to the - console. - - Returns length of the ansible run. - """ - oldest_msg_ts = 0 - buf = [] - task_report = [] # list of tasks for reporting - last_task = None - completed = 0 - while True: - messages = [] - while True: - # get all available messages on the queue - msgs = sqs_queue.get_messages(attributes='All') - if not msgs: - break - messages.extend(msgs) - - for message in messages: - recv_ts = float( - message.attributes['ApproximateFirstReceiveTimestamp']) * .001 - sent_ts = float(message.attributes['SentTimestamp']) * .001 - try: - msg_info = { - 'msg': json.loads(message.get_body()), - 'sent_ts': sent_ts, - 'recv_ts': recv_ts, - } - buf.append(msg_info) - except ValueError as e: - print "!!! ERROR !!! unable to parse queue message, " \ - "expecting valid json: {} : {}".format( - message.get_body(), e) - if not oldest_msg_ts or recv_ts < oldest_msg_ts: - oldest_msg_ts = recv_ts - sqs_queue.delete_message(message) - - now = int(time.time()) - if buf: - try: - if (now - min([msg['recv_ts'] for msg in buf])) > args.msg_delay: - # sort by TS instead of recv_ts - # because the sqs timestamp is not as - # accurate - buf.sort(key=lambda k: k['msg']['TS']) - to_disp = buf.pop(0) - if 'START' in to_disp['msg']: - print '\n{:0>2.0f}:{:0>5.2f} {} : Starting "{}"'.format( - to_disp['msg']['TS'] / 60, - to_disp['msg']['TS'] % 60, - to_disp['msg']['PREFIX'], - to_disp['msg']['START']), - - elif 'TASK' in to_disp['msg']: - print "\n{:0>2.0f}:{:0>5.2f} {} : {}".format( - to_disp['msg']['TS'] / 60, - to_disp['msg']['TS'] % 60, - to_disp['msg']['PREFIX'], - to_disp['msg']['TASK']), - last_task = to_disp['msg']['TASK'] - elif 'OK' in to_disp['msg']: - if args.verbose: - print "\n" - for key, value in to_disp['msg']['OK'].iteritems(): - print " {:<15}{}".format(key, value) - else: - invocation = to_disp['msg']['OK']['invocation'] - module = invocation['module_name'] - # 'set_fact' does not provide a changed value. - if module == 'set_fact': - changed = "OK" - elif to_disp['msg']['OK']['changed']: - changed = "*OK*" - else: - changed = "OK" - print " {}".format(changed), - task_report.append({ - 'TASK': last_task, - 'INVOCATION': to_disp['msg']['OK']['invocation'], - 'DELTA': to_disp['msg']['delta'], - }) - elif 'FAILURE' in to_disp['msg']: - print " !!!! FAILURE !!!!", - for key, value in to_disp['msg']['FAILURE'].iteritems(): - print " {:<15}{}".format(key, value) - raise Exception("Failed Ansible run") - elif 'STATS' in to_disp['msg']: - print "\n{:0>2.0f}:{:0>5.2f} {} : COMPLETE".format( - to_disp['msg']['TS'] / 60, - to_disp['msg']['TS'] % 60, - to_disp['msg']['PREFIX']) - - # Since 3 ansible plays get run. - # We see the COMPLETE message 3 times - # wait till the last one to end listening - # for new messages. - completed += 1 - if completed >= NUM_PLAYBOOKS: - return (to_disp['msg']['TS'], task_report) - except KeyError: - print "Failed to print status from message: {}".format(to_disp) - - if not messages: - # wait 1 second between sqs polls - time.sleep(1) - - -def create_ami(instance_id, name, description): - - params = {'instance_id': instance_id, - 'name': name, - 'description': description, - 'no_reboot': False} - - AWS_API_WAIT_TIME = 1 - image_id = ec2.create_image(**params) - print("Checking if image is ready.") - for _ in xrange(AMI_TIMEOUT): - try: - img = ec2.get_image(image_id) - if img.state == 'available': - print("Tagging image.") - img.add_tag("environment", args.environment) - time.sleep(AWS_API_WAIT_TIME) - img.add_tag("deployment", args.deployment) - time.sleep(AWS_API_WAIT_TIME) - img.add_tag("cluster", args.play) - time.sleep(AWS_API_WAIT_TIME) - img.add_tag("play", args.play) - time.sleep(AWS_API_WAIT_TIME) - conf_tag = "{} {}".format("http://github.com/edx/configuration", args.configuration_version) - img.add_tag("version:configuration", conf_tag) - time.sleep(AWS_API_WAIT_TIME) - conf_secure_tag = "{} {}".format(args.configuration_secure_repo, args.configuration_secure_version) - img.add_tag("version:configuration_secure", conf_secure_tag) - time.sleep(AWS_API_WAIT_TIME) - conf_internal_tag = "{} {}".format(args.configuration_internal_repo, args.configuration_internal_version) - img.add_tag("version:configuration_internal", conf_internal_tag) - time.sleep(AWS_API_WAIT_TIME) - img.add_tag("cache_id", args.cache_id) - time.sleep(AWS_API_WAIT_TIME) - - # Get versions from the instance. - tags = ec2.get_all_tags(filters={'resource-id': instance_id}) - for tag in tags: - if tag.name.startswith('version:'): - img.add_tag(tag.name, tag.value) - time.sleep(AWS_API_WAIT_TIME) - break - else: - time.sleep(1) - except EC2ResponseError as e: - if e.error_code == 'InvalidAMIID.NotFound': - time.sleep(1) - else: - raise Exception("Unexpected error code: {}".format( - e.error_code)) - time.sleep(1) - else: - raise Exception("Timeout waiting for AMI to finish") - - return image_id - - -def launch_and_configure(ec2_args): - """ - Creates an sqs queue, launches an ec2 instance, - configures it and creates an AMI. Polls - SQS for updates - """ - - print "{:<40}".format( - "Creating SQS queue and launching instance for {}:".format(run_id)) - print - for k, v in ec2_args.iteritems(): - if k != 'user_data': - print " {:<25}{}".format(k, v) - print - - global sqs_queue - global instance_id - sqs_queue = sqs.create_queue(run_id) - sqs_queue.set_message_class(RawMessage) - res = ec2.run_instances(**ec2_args) - inst = res.instances[0] - instance_id = inst.id - - print "{:<40}".format( - "Waiting for instance {} to reach running status:".format(instance_id)), - status_start = time.time() - for _ in xrange(EC2_RUN_TIMEOUT): - try: - res = ec2.get_all_instances(instance_ids=[instance_id]) - except EC2ResponseError as e: - if e.code == "InvalidInstanceID.NotFound": - print("Instance not found({}), will try again.".format( - instance_id)) - time.sleep(1) - continue - else: - raise(e) - if res[0].instances[0].state == 'running': - status_delta = time.time() - status_start - run_summary.append(('EC2 Launch', status_delta)) - print "[ OK ] {:0>2.0f}:{:0>2.0f}".format( - status_delta / 60, - status_delta % 60) - break - else: - time.sleep(1) - else: - raise Exception("Timeout waiting for running status: {} ".format( - instance_id)) - - print "{:<40}".format("Waiting for system status:"), - system_start = time.time() - for _ in xrange(EC2_STATUS_TIMEOUT): - status = ec2.get_all_instance_status(inst.id) - if status and status[0].system_status.status == u'ok': - system_delta = time.time() - system_start - run_summary.append(('EC2 Status Checks', system_delta)) - print "[ OK ] {:0>2.0f}:{:0>2.0f}".format( - system_delta / 60, - system_delta % 60) - break - else: - time.sleep(1) - else: - raise Exception("Timeout waiting for status checks: {} ".format( - instance_id)) - - print - print "{:<40}".format( - "Waiting for user-data, polling sqs for Ansible events:") - - (ansible_delta, task_report) = poll_sqs_ansible() - run_summary.append(('Ansible run', ansible_delta)) - print - print "{} longest Ansible tasks (seconds):".format(NUM_TASKS) - for task in sorted( - task_report, reverse=True, - key=lambda k: k['DELTA'])[:NUM_TASKS]: - print "{:0>3.0f} {}".format(task['DELTA'], task['TASK']) - print " - {}".format(task['INVOCATION']) - print - - print "{:<40}".format("Creating AMI:"), - ami_start = time.time() - ami = create_ami(instance_id, run_id, run_id) - ami_delta = time.time() - ami_start - print "[ OK ] {:0>2.0f}:{:0>2.0f}".format( - ami_delta / 60, - ami_delta % 60) - run_summary.append(('AMI Build', ami_delta)) - total_time = time.time() - start_time - all_stages = sum(run[1] for run in run_summary) - if total_time - all_stages > 0: - run_summary.append(('Other', total_time - all_stages)) - run_summary.append(('Total', total_time)) - - return run_summary, ami - - -def send_hipchat_message(message): - print(message) - if args.callback_url: - r=requests.get("{}/{}".format(args.callback_url, message)) - else: - #If hipchat is configured send the details to the specified room - if args.hipchat_api_token and args.hipchat_room_id: - import hipchat - try: - hipchat = hipchat.HipChat(token=args.hipchat_api_token) - hipchat.message_room(args.hipchat_room_id, 'AbbeyNormal', - message) - except Exception as e: - print("Hipchat messaging resulted in an error: %s." % e) - -if __name__ == '__main__': - - args = parse_args() - - run_summary = [] - - start_time = time.time() - - if args.vars: - with open(args.vars) as f: - extra_vars_yml = f.read() - extra_vars = yaml.load(extra_vars_yml) - else: - extra_vars_yml = "" - extra_vars = {} - - if args.secure_vars_file: - # explicit path to a single - # secure var file - secure_vars_file = args.secure_vars_file - else: - secure_vars_file = 'false' - - if args.stack_name: - stack_name = args.stack_name - else: - stack_name = "{}-{}".format(args.environment, args.deployment) - - try: - ec2 = boto.ec2.connect_to_region(args.region) - except NoAuthHandlerFound: - print 'Unable to connect to ec2 in region :{}'.format(args.region) - sys.exit(1) - - try: - sqs = boto.sqs.connect_to_region(args.region) - except NoAuthHandlerFound: - print 'Unable to connect to sqs in region :{}'.format(args.region) - sys.exit(1) - - if args.blessed: - base_ami = get_blessed_ami() - else: - base_ami = args.base_ami - - error_in_abbey_run = False - try: - sqs_queue = None - instance_id = None - - run_id = "{}-abbey-{}-{}-{}".format( - int(time.time() * 100), args.environment, args.deployment, args.play) - - ec2_args = create_instance_args() - - if args.noop: - print "Would have created sqs_queue with id: {}\nec2_args:".format( - run_id) - pprint(ec2_args) - ami = "ami-00000" - else: - run_summary, ami = launch_and_configure(ec2_args) - print - print "Summary:\n" - - for run in run_summary: - print "{:<30} {:0>2.0f}:{:0>5.2f}".format( - run[0], run[1] / 60, run[1] % 60) - print "AMI: {}".format(ami) - - message = 'Finished baking AMI {image_id} for {environment} {deployment} {play}.'.format( - image_id=ami, - environment=args.environment, - deployment=args.deployment, - play=args.play) - - send_hipchat_message(message) - except Exception as e: - message = 'An error occurred building AMI for {environment} ' \ - '{deployment} {play}. The Exception was {exception}'.format( - environment=args.environment, - deployment=args.deployment, - play=args.play, - exception=repr(e)) - send_hipchat_message(message) - error_in_abbey_run = True - finally: - print - if not args.no_cleanup and not args.noop: - if sqs_queue: - print "Cleaning up - Removing SQS queue - {}".format(run_id) - sqs.delete_queue(sqs_queue) - if instance_id: - print "Cleaning up - Terminating instance ID - {}".format( - instance_id) - # Check to make sure we have an instance id. - if instance_id: - ec2.terminate_instances(instance_ids=[instance_id]) - if error_in_abbey_run: - exit(1) diff --git a/util/vpc-tools/asg_lifcycle_watcher.py b/util/vpc-tools/asg_lifcycle_watcher.py index 5475e8521dc..15f1ab8d512 100644 --- a/util/vpc-tools/asg_lifcycle_watcher.py +++ b/util/vpc-tools/asg_lifcycle_watcher.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import __author__ = 'e0d' """ diff --git a/util/vpc-tools/requirements.txt b/util/vpc-tools/requirements.txt index 4a68d409de6..b97ee5d943e 100644 --- a/util/vpc-tools/requirements.txt +++ b/util/vpc-tools/requirements.txt @@ -4,11 +4,10 @@ # # make upgrade # -boto==2.48.0 -certifi==2018.4.16 # via requests +boto==2.49.0 # via -r requirements/vpc-tools.in +certifi==2020.4.5.1 # via requests chardet==3.0.4 # via requests -docopt==0.6.2 -idna==2.6 # via requests -python-simple-hipchat==0.2 -requests==2.18.4 -urllib3==1.22 # via requests +docopt==0.6.2 # via -r requirements/vpc-tools.in +idna==2.9 # via requests +requests==2.23.0 # via -r requirements/vpc-tools.in +urllib3==1.25.9 # via requests diff --git a/util/vpc-tools/tag-old-ebs.py b/util/vpc-tools/tag-old-ebs.py index 4a4112b4be3..90fdae41335 100644 --- a/util/vpc-tools/tag-old-ebs.py +++ b/util/vpc-tools/tag-old-ebs.py @@ -3,6 +3,7 @@ For a given aws account, go through all un-attached volumes and tag them. """ +from __future__ import absolute_import import boto import boto.utils import argparse @@ -14,6 +15,7 @@ import yaml # needs to be pip installed import netaddr +from six.moves import filter LOG_FORMAT = "%(asctime)s %(levelname)s - %(filename)s:%(lineno)s - %(message)s" TIMEOUT = 300 @@ -50,13 +52,13 @@ def potential_devices(root_device): relevant_devices = lambda x: x.startswith(basename(root_device)) all_devices = os.listdir(device_dir) - all_devices = filter(relevant_devices, all_devices) + all_devices = list(filter(relevant_devices, all_devices)) logging.info("Potential devices on {}: {}".format(root_device, all_devices)) if len(all_devices) > 1: all_devices.remove(basename(root_device)) - return map(lambda x: join(device_dir, x), all_devices) + return [join(device_dir, x) for x in all_devices] def get_tags_for_disk(mountpoint): tag_data = {} diff --git a/util/vpc-tools/vpc_dns.py b/util/vpc-tools/vpc_dns.py index d8e051a75ac..3f34b7793e9 100644 --- a/util/vpc-tools/vpc_dns.py +++ b/util/vpc-tools/vpc_dns.py @@ -22,6 +22,8 @@ # python vpc_dns.py -s stage-stack -z vpc.example.com # +from __future__ import absolute_import +from __future__ import print_function import argparse import boto import datetime @@ -78,13 +80,13 @@ def add_or_update_record(dns_records): """.format(record.record_name, record.record_type, record.record_ttl, record.record_values) if args.noop: - print("Would have updated DNS record:\n{}".format(status_msg)) + print(("Would have updated DNS record:\n{}".format(status_msg))) else: - print("Updating DNS record:\n{}".format(status_msg)) + print(("Updating DNS record:\n{}".format(status_msg))) if record.record_name in record_names: - print("Unable to create record for {} with value {} because one already exists!".format( - record.record_values, record.record_name)) + print(("Unable to create record for {} with value {} because one already exists!".format( + record.record_values, record.record_name))) sys.exit(1) record_names.add(record.record_name) @@ -97,15 +99,15 @@ def add_or_update_record(dns_records): # If the record name already points to something. # Delete the existing connection. If the record has # the same type and name skip it. - if record.record_name in old_records.keys(): + if record.record_name in list(old_records.keys()): if record.record_name + "." == old_records[record.record_name].name and \ record.record_type == old_records[record.record_name].type: - print("Record for {} already exists and is identical, skipping.\n".format( - record.record_name)) + print(("Record for {} already exists and is identical, skipping.\n".format( + record.record_name))) continue if args.force: - print("Deleting record:\n{}".format(status_msg)) + print(("Deleting record:\n{}".format(status_msg))) change = change_set.add_change( 'DELETE', record.record_name, @@ -133,7 +135,7 @@ def add_or_update_record(dns_records): else: print("Submitting the following change set:\n") xml_doc = xml.dom.minidom.parseString(change_set.to_xml()) - print(xml_doc.toprettyxml(newl='')) # newl='' to remove extra newlines + print((xml_doc.toprettyxml(newl=''))) # newl='' to remove extra newlines if not args.noop: r53.change_rrsets(zone_id, change_set.to_xml()) @@ -152,21 +154,21 @@ def get_or_create_hosted_zone(zone_name): if args.noop: if parent_zone: - print("Would have created/updated zone: {} parent: {}".format( - zone_name, parent_zone_name)) + print(("Would have created/updated zone: {} parent: {}".format( + zone_name, parent_zone_name))) else: - print("Would have created/updated zone: {}".format( - zone_name, parent_zone_name)) + print(("Would have created/updated zone: {}".format( + zone_name, parent_zone_name))) return zone if not zone: - print("zone {} does not exist, creating".format(zone_name)) + print(("zone {} does not exist, creating".format(zone_name))) ts = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H:%M:%SUTC') zone = r53.create_hosted_zone( zone_name, comment="Created by vpc_dns script - {}".format(ts)) if parent_zone: - print("Updating parent zone {}".format(parent_zone_name)) + print(("Updating parent zone {}".format(parent_zone_name))) dns_records = set() dns_records.add(DNSRecord(parent_zone, zone_name, 'NS', 900, zone.NameServers)) @@ -188,7 +190,7 @@ def get_dns_from_instances(elb): instance = ec2_con.get_all_instances( instance_ids=[inst.id])[0].instances[0] except IndexError: - print("instance {} attached to elb {}".format(inst, elb)) + print(("instance {} attached to elb {}".format(inst, elb))) sys.exit(1) try: env_tag = instance.tags['environment'] @@ -200,8 +202,8 @@ def get_dns_from_instances(elb): play_tag = instance.tags['role'] break # only need the first instance for tag info except KeyError: - print("Instance {}, attached to elb {} does not " - "have a tag for environment, play or deployment".format(inst, elb)) + print(("Instance {}, attached to elb {} does not " + "have a tag for environment, play or deployment".format(inst, elb))) sys.exit(1) return env_tag, deployment_tag, play_tag @@ -244,7 +246,7 @@ def update_elb_rds_dns(zone): # the ELB_BAN_LIST if any(name in elb.name for name in ELB_BAN_LIST): - print("Skipping {} because it is on the ELB ban list".format(elb.name)) + print(("Skipping {} because it is on the ELB ban list".format(elb.name))) continue dns_records.add(DNSRecord(zone, fqdn, 'CNAME', 600, [elb.dns_name])) diff --git a/util/vpc-tools/vpcutil.py b/util/vpc-tools/vpcutil.py index 1834e8e901f..05e1110116c 100644 --- a/util/vpc-tools/vpcutil.py +++ b/util/vpc-tools/vpcutil.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import import boto import boto.rds2 import boto.rds @@ -35,4 +36,4 @@ def rds_subnet_group_name_for_stack_name(stack_name, region='us-east-1', aws_id= def all_stack_names(region='us-east-1', aws_id=None, aws_secret=None): vpc_conn = boto.connect_vpc(aws_id, aws_secret) return [vpc.tags[CFN_TAG_KEY] for vpc in vpc_conn.get_all_vpcs() - if CFN_TAG_KEY in vpc.tags.keys()] + if CFN_TAG_KEY in list(vpc.tags.keys())] diff --git a/vagrant/base/analyticstack/Vagrantfile b/vagrant/base/analyticstack/Vagrantfile index 7279293ff89..6c3fc18b2d3 100644 --- a/vagrant/base/analyticstack/Vagrantfile +++ b/vagrant/base/analyticstack/Vagrantfile @@ -22,7 +22,7 @@ VERSION_VARS = [ 'configuration_version', 'certs_version', 'forum_version', - 'xqueue_version', + 'XQUEUE_VERSION', 'demo_version', 'NOTIFIER_VERSION', 'ECOMMERCE_VERSION', diff --git a/vagrant/release/analyticstack/Vagrantfile b/vagrant/release/analyticstack/Vagrantfile index f7bbd1fd6fc..f854e226cb7 100644 --- a/vagrant/release/analyticstack/Vagrantfile +++ b/vagrant/release/analyticstack/Vagrantfile @@ -17,7 +17,7 @@ VERSION_VARS = [ 'configuration_version', 'certs_version', 'forum_version', - 'xqueue_version', + 'XQUEUE_VERSION', 'demo_version', 'NOTIFIER_VERSION', 'ECOMMERCE_VERSION',