diff --git a/cli/commands.js b/cli/commands.js index cd7224a05f6c..d7d8ecdade6d 100644 --- a/cli/commands.js +++ b/cli/commands.js @@ -6,6 +6,9 @@ module.exports = { aws: { desc: 'Deploy the current application to Amazon Web Services' }, + 'aws-containers': { + desc: 'Deploy the current application to Amazon Web Services using ECS' + }, 'ci-cd': { desc: 'Create pipeline scripts for popular Continuous Integration/Continuous Deployment tools' }, diff --git a/generators/aws-containers/README.md b/generators/aws-containers/README.md new file mode 100644 index 000000000000..ee0e70590091 --- /dev/null +++ b/generators/aws-containers/README.md @@ -0,0 +1,24 @@ +# AWS JHipster Generator - _A new hope_ + +## AWS +### ECR +#### Authentication Token +The token received by ECR to authenticate is structured as follow. We'll only take the first object. +```json + { + "authorizationToken":"AUTHORIZATION_TOKEN", + "expiresAt":"2017-12-12T10:52:24.810Z", + "proxyEndpoint":"https://foo.ecr.region.amazonaws.com" + } +``` + +## Development +### Dependencies + +``` +# NPM +npm i --save-dev aws-sdk@^2.167.0 progress@2.0.0 ora@1.3.0 + +# Yarn +yarn add --dev aws-sdk@^2.167.0 progress@2.0.0 ora@1.3.0 +``` diff --git a/generators/aws-containers/USAGE b/generators/aws-containers/USAGE new file mode 100644 index 000000000000..ea76c8fe5420 --- /dev/null +++ b/generators/aws-containers/USAGE @@ -0,0 +1,5 @@ +Description: + Initializes a AWS app and generates a Docker container that is ready to push to AWS. + +Example: + jhipster aws-containers diff --git a/generators/aws-containers/aws-client.js b/generators/aws-containers/aws-client.js new file mode 100644 index 000000000000..c22ba37f05db --- /dev/null +++ b/generators/aws-containers/aws-client.js @@ -0,0 +1,299 @@ +/* global process */ +const _ = require('lodash'); +const fs = require('fs'); +const chalk = require('chalk'); +const shelljs = require('shelljs'); + +const utils = require('../utils'); + +const AwsSSM = require('./lib/ssm'); +const AwsECR = require('./lib/ecr'); +const AwsCF = require('./lib/cf'); + +const DEFAULT_REGION = 'us-east-1'; +const S3_MIN_PART_SIZE = 5242880; + + +// Instance from aws-sdk +let AWS; +let credentials; +let ec2; +// let ecr; +let s3; +let sts; +let ora; + +// Instances from ./lib. Composed with aws-sdk +let SSM; +let ECR; +let CF; + +let ProgressBar; + +module.exports = { + DEFAULT_REGION, + SSM: () => SSM, + ECR: () => ECR, + CF: () => CF, + createS3Bucket, + getDockerLogin, + listRegions, + listSubnets, + listVpcs, + loadAWS, + saveCredentialsInAWS, + initAwsStuff, + sanitizeBucketName, + uploadTemplate, +}; + +/** + * Will load the aws-sdk npm dependency if it's not already loaded. + * + * @param generator the yeoman generator it'll be loaded in. + * @returns {Promise} The promise will succeed if the aws-sdk has been loaded and fails if it couldn't be installed. + */ +function loadAWS(generator) { + return new Promise((resolve, reject) => { + try { + AWS = require('aws-sdk') // eslint-disable-line + ProgressBar = require('progress'); // eslint-disable-line + ora = require('ora'); // eslint-disable-line + } catch (e) { + generator.log('Installing AWS dependencies'); + let installCommand = 'yarn add aws-sdk@^2.167.0 progress@2.0.0 ora@1.3.0'; + if (generator.config.get('clientPackageManager') === 'npm') { + installCommand = 'npm install aws-sdk@^2.167.0 progress@2.0.0 ora@1.3.0--save'; + } + shelljs.exec(installCommand, { silent: false }, (code) => { + if (code !== 0) { + generator.error('Something went wrong while installing the dependencies\n'); + reject(); + } + AWS = require('aws-sdk') // eslint-disable-line + ProgressBar = require('progress'); // eslint-disable-line + ora = require('ora'); // eslint-disable-line + }); + } + resolve(); + }); +} + +/** + * Init AWS stuff like ECR and whatnot. + * + * @param ecrConfig The config used to instanciate ECR + */ +function initAwsStuff(region = DEFAULT_REGION) { + ec2 = new AWS.EC2({ region }); + // ecr = new AWS.ECR({ region }); + s3 = new AWS.S3(); + sts = new AWS.STS(); + + SSM = new AwsSSM(region); + ECR = new AwsECR(region); + CF = new AwsCF(region); +} + +/** + * Wraps the promise in a CLI spinner + * @param promise + */ +function spinner(promise, text = 'loading', spinnerIcon = 'monkey') { + const spinner = ora({ spinner: spinnerIcon, text }).start(); + return new Promise((resolve, reject) => { + promise.then((resolved) => { + spinner.stop(); + resolve(resolved); + }).catch((err) => { + spinner.stop(); + reject(err); + }); + }); +} + + +/** + * listRegions() returns a Promise, which resolves to an array of AWS region objects, + * with "Endpoint" and "RegionName" properties + * + * @param region to use. Defaults to us-east-1 + * @returns {Promise} + */ +function listRegions() { + return spinner(ec2.describeRegions({}) + .promise() + .then(data => data.Regions)); +} + +/** + * listVpcs(region) returns a Promise, which resolves to an array of AWS VPC objects + * @param region to look for VPCs + * @returns {Promise} + */ +function listVpcs() { + return spinner(ec2.describeVpcs({}) + .promise() + .then(data => data.Vpcs)); +} + +/** + * listSubnets(vpcId) returns a Promise, which resolves to an array of + * Subnets available within the supplied VPC + * @param region to look for subnets + * @param vpcId of the VPC with the subnets + * @returns {Promise} + */ +function listSubnets(vpcId) { + const params = { + Filters: [ + { + Name: 'vpc-id', + Values: [vpcId] + }, { + Name: 'state', + Values: ['available'] + } + ] + }; + return spinner(ec2.describeSubnets(params) + .promise().then(data => data.Subnets)); +} + +/** + * Get the credentials from the ~/.aws/credentials file using the AWS_PROFILE env var to get the profile. + * + * @param profile The AWS profile to get the credentials from. Default to 'default' + * @returns {Promise} Will resolve with no parameters if it succeeds, rejects with the error if it fails (no credentials found for given profile. + */ +function saveCredentialsInAWS(profile = 'default') { + credentials = new AWS.SharedIniFileCredentials({ profile }); + return new Promise((resolve, reject) => credentials.refresh((err) => { + if (err) { + reject(err); + } + AWS.config.credentials = credentials; + resolve(); + })); +} + +/** + * Retrieve decoded information to authenticate to Docker with AWS credentials. + * @returns {Promise} Returns a promise that resolves when the informations are retrieved. + */ +function getDockerLogin() { + return spinner(new Promise((resolve, reject) => _getAuthorizationToken() + .then(authToken => sts.getCallerIdentity({}).promise() + .then((data) => { + const decoded = utils.decodeBase64(authToken.authorizationToken); + const splitResult = _.split(decoded, ':'); + resolve({ + username: splitResult[0], + password: splitResult[1], + accountId: data.Account + }); + }) + .catch(() => reject(new Error('Couldn\'t retrieve the user informations')))))); +} + +/** + * Fetch Authentication token from AWS to authenticate with Docker + * @returns {Promise} Returns a promise that resolves when the informations are retrieved. + * @private + */ +function _getAuthorizationToken() { + return spinner(new Promise((resolve, reject) => + ECR.sdk.getAuthorizationToken({}).promise() + .then((data) => { + if (!_.has(data, 'authorizationData.0')) { + reject(new Error('No authorization data found.')); + return; + } + resolve(data.authorizationData[0]); + }))); +} + +/** + * Create a S3 Bucket in said region with said bucketBaseName. + * the bucketBaseName will be used to create a + * @param bucketName the name of the bucket to create. + * @param region the region to create the bucket in. + * @returns {Promise} + */ +function createS3Bucket(bucketName, region = DEFAULT_REGION) { + const createBuckerParams = { + Bucket: bucketName + }; + return spinner(new Promise((resolve, reject) => s3.headBucket({ + Bucket: bucketName + }).promise() + .catch((error) => { + if (error.code !== 'NotFound') { + reject(new Error(`The S3 Bucket ${chalk.bold(bucketName)} in region ${chalk.bold(region)} already exists and you don't have access to it. Error code: ${chalk.bold(error.code)}`)); + } + }) + .then(() => + s3.createBucket(createBuckerParams).promise() + .then(resolve) + .catch(error => reject(new Error(`There was an error during the creation of the S3 Bucket ${chalk.bold(bucketName)} in region ${chalk.bold(region)}`)))))); +} + +/** + * Upload the template in the S3Bucket + * @param bucketName S3 Bucket name to upload the template into + * @param filename Name to give to the file in the Bucket + * @param path Path to the file + * @returns {Promise} + */ +function uploadTemplate(bucketName, filename, path) { + return spinner(new Promise((resolve, reject) => + fs.stat(path, (error, stats) => { + if (_.isNil(stats)) { + reject(new Error(`File ${chalk.bold(path)} not found`)); + } + const upload = s3.upload( + { + Bucket: bucketName, + Key: filename, + Body: fs.createReadStream(path) + }, + { + partSize: Math.max(stats.size, S3_MIN_PART_SIZE), + queueSize: 1 + } + ); + let bar; + upload.on('httpUploadProgress', (evt) => { + if (bar === undefined && evt.total) { + const total = evt.total / 1000000; + bar = new ProgressBar('uploading [:bar] :percent :etas', { + complete: '=', + incomplete: ' ', + width: 20, + total, + clear: true + }); + } + + const curr = evt.loaded / 1000000; + bar.tick(curr - bar.curr); + }); + return upload.promise() + .then(resolve) + .catch(reject); + }))); +} + +/** + * Sanitize the bucketName following the rule found here: + * http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html + * @param bucketName + * @returns {string} + */ +function sanitizeBucketName(bucketName) { + let labels = _.split(bucketName, '.'); + labels = _.filter(labels, e => e); + labels = _.map(labels, _.toLower); + labels = _.map(labels, e => _.replace(e, '_', '-')); + return _.join(labels, '.'); +} diff --git a/generators/aws-containers/index.js b/generators/aws-containers/index.js new file mode 100644 index 000000000000..a3773c12952d --- /dev/null +++ b/generators/aws-containers/index.js @@ -0,0 +1,598 @@ +const _ = require('lodash'); +const chalk = require('chalk'); + +const BaseGenerator = require('../generator-base'); +const docker = require('../docker-base'); +const dockerCli = require('../docker-cli'); +const dockerUtils = require('../docker-utils'); +const dockerPrompts = require('../docker-prompts'); +const constants = require('../generator-constants'); + +const prompts = require('./prompts'); +const awsClient = require('./aws-client'); + +const AWS_SSM_ARTIFACT = 'aws-java-sdk-ssm'; +const AWS_SSM_GROUP = 'com.amazonaws'; +const SPRING_CLOUD_GROUP = 'org.springframework.cloud'; +const SPRING_CLOUD_ARTIFACT = 'spring-cloud-context'; +const AWS_SSM_VERSION = '1.11.247'; +const SPRING_CLOUD_CTX_VERSION = '1.3.0.RELEASE'; + +const BASE_TEMPLATE_PATH = 'base.template.yml'; +const APP_TEMPLATE_PATH = baseName => `${baseName}.template.yml`; +const AWSSSM_CONFIG_PATH = (directory, packageFolder) => `${directory}/${constants.SERVER_MAIN_SRC_DIR}/${packageFolder}/bootstrap/AwsSSMConfiguration.java`; +const SPRING_FACTORIES_PATH = directory => `${directory}/${constants.SERVER_MAIN_RES_DIR}/META-INF/spring.factories`; +const BOOTSTRAP_PATH = directory => `${directory}/${constants.SERVER_MAIN_RES_DIR}/config/bootstrap-aws.yml`; + +const BASE_TEMPLATE_FILENAME = '_base.template.yml'; +const APP_TEMPLATE_FILENAME = '_application.template.yml'; + +const AWSSSM_CONFIG_FILENAME = '_AwsSSMConfiguration.java'; +const SPRING_FACTORIES_FILENAME = '_spring.factories'; +const BOOTSTRAP_FILENAME = '_bootstrap-aws.yml'; + +/** + * Returns the password property to be stored within Amazon SSM + * @param stacKName Name of the root stack + * @param applicationName Name of the application property + * @returns {string} + * Full property path + */ +const ssmPasswordProperty = (stacKName, applicationName) => `/${stacKName}/${applicationName}/spring.datasource.password`; + + +module.exports = class extends BaseGenerator { + constructor(args, opts) { + super(args, opts); + + this.configOptions = this.options.configOptions || {}; + + // This adds support for a `--skip-build` flag + this.option('skip-build', { + desc: 'Disables the project build step', + type: Boolean, + defaults: false + }); + + // This adds support for a `--skip-upload` flag + this.option('skip-upload', { + desc: 'Skips the Docker Image Tag + Upload process', + type: Boolean, + defaults: false + }); + } + + get initializing() { + return { + bonjour() { + this.log(chalk.bold('This AWS generator will help you deploy your JHipster app as a Docker container on AWS.')); + }, + option() { + this.deployNow = this.options['skip-install']; + this.skipUpload = this.options['skip-upload']; + this.skipBuild = this.options['skip-build']; + }, + getConfig() { + // this.baseName = this.config.get(constants.conf.baseName); + // this.dbType = this.config.get(constants.conf.databaseType); + // this.buildTool = this.config.get(constants.conf.buildTool); + // + // this.packageName = this.config.get('packageName'); + // this.packageFolder = this.config.get('packageFolder'); + // this.prodDatabaseType = this.config.get('prodDatabaseType'); + // this.hasAWSConfig = !!this.config.get('aws'); + + this.aws = Object.assign( + {}, + { + apps: [], + vpc: {}, + dockerLogin: { + accountId: null, + password: null + } + }, + this.config.get('aws') + ); + + this.defaultAppsFolders = this.aws.apps.map(a => a.baseName); + }, + checkDocker: docker.checkDocker, + loadAWS() { + if (this.abort) return; + const done = this.async(); + awsClient.loadAWS(this) + .then(() => done()) + .catch(() => done('Error while loading the AWS library')); + }, + checkAwsCredentials() { + if (this.abort) return; + const done = this.async(); + let profile = process.env.AWS_PROFILE; + if (!profile) { + profile = 'default'; + } + awsClient.saveCredentialsInAWS(profile) + .then(() => { + this.log.ok(`AWS credentials using profile ${chalk.bold(profile)}.`); + done(); + }) + .catch(() => { + this.log.error(chalk.red(`No AWS credentials found for profile ${chalk.bold(profile)}`)); + this.abort = true; + done(); + }); + }, + initAwsStuff() { + awsClient.initAwsStuff(); + }, + setOutputs() { + dockerCli.setOutputs( + data => this.log(chalk.white(data.toString().trim())), + data => this.log.error(data.toString().trim()) + ); + + awsClient.CF().setOutputs( + message => this.log(message), + message => this.log.error(message) + ); + }, + fetchRegion() { + if (this.abort) return; + const done = this.async(); + this.awsFacts = { + apps: [], + defaultRegion: awsClient.DEFAULT_REGION, + database_Password_InSSM: false + }; + awsClient.listRegions() + .then((regions) => { + const regionsLabel = _.map(regions, r => r.RegionName); + prompts.setRegionList(regionsLabel); + done(); + }) + .catch((err) => { + this.log.error(err); + this.abort = true; + done(); + }); + }, + }; + } + + get prompting() { + return { + bonjour() { + if (this.abort) return; + this.log(chalk.bold('❓ AWS prompting')); + }, + askTypeOfApplication: prompts.askTypeOfApplication, + askDirectoryPath: dockerPrompts.askForPath, + askForApps: dockerPrompts.askForApps, + getAppConfig: dockerPrompts.loadConfigs, + askRegion: prompts.askRegion, + initAwsAndLoadVPCs() { + awsClient.initAwsStuff(this.aws.region); + return awsClient.listVpcs().then((listOfVpcs) => { + this.awsFacts.availableVpcs = listOfVpcs; + }); + }, + askVPC: prompts.askVPC, + initSubnets() { + const done = this.async; + return awsClient.listSubnets(this.aws.vpc.id) + .then((subnets) => { + this.awsFacts.availableSubnets = subnets; + done(); + }) + .catch(() => { + this.log.error('Unable to fetch the subnets'); + done(); + }); + }, + askForSubnets: prompts.askForSubnets, + askCloudFormation: prompts.askCloudFormation, + askPerformances: prompts.askPerformances, + retrievePassword() { + const done = this.async; + // Attempts to retrieve a previously set database password from SSM. + const promises = this.aws.apps.map(app => awsClient.SSM().getSSMParameter(ssmPasswordProperty(this.aws.cloudFormationName, app.baseName)) + .then((password) => { + if (password) { + let fact = this.awsFacts.apps.find(a => a.baseName === app.baseName); + if (_.isUndefined(fact)) { + fact = { + baseName: app.baseName + }; + this.awsFacts.apps.push(fact); + } + + fact.database_Password = password; + fact.database_Password_InSSM = true; + } + }) + .catch(() => { + this.log.error('Unable to fetch the SSM Parameters'); + })); + + return Promise.all(promises) + .then(() => done()) + .catch(() => { + this.abort = true; + done(); + }); + }, + askForDBPassword: prompts.askForDBPasswords, + askDeployNow: prompts.askDeployNow + }; + } + + get configuring() { + return { + bonjour() { + if (this.abort) return; + this.log(chalk.bold('🔧🛠️ AWS configuring')); + }, + purgeAwsApps() { + this.aws.apps = this.aws.apps.filter(app => this.appConfigs.find(conf => conf.baseName === app.baseName)); + }, + getDockerLogin() { + if (this.abort) return null; + const done = this.async; + return awsClient.getDockerLogin() + .then((token) => { + this.log.ok('ECR Auth token has been retrieved.'); + this.aws.dockerLogin = token; + done(); + }) + .catch((error) => { + this.log.error(error); + this.abort = true; + done(); + }); + }, + setBucketName() { + this.aws.s3BucketName = this.aws.s3BucketName || awsClient.sanitizeBucketName(`${this.aws.cloudFormationName}_${new Date().getTime()}`); + } + }; + } + + get default() { + return { + bonjour() { + if (this.abort) return; + this.log(chalk.bold('AWS default')); + }, + addAWSSpringDependencies() { + this.appConfigs.forEach((config) => { + const directory = `${this.directoryPath}${config.appFolder}`; + if (config.buildTool === 'maven') { + this.addMavenDependencyInDirectory(directory, AWS_SSM_GROUP, AWS_SSM_ARTIFACT, AWS_SSM_VERSION); + this.addMavenDependencyInDirectory(directory, SPRING_CLOUD_GROUP, SPRING_CLOUD_ARTIFACT, SPRING_CLOUD_CTX_VERSION); + } else if (config.buildTool === 'gradle') { + this.addGradleDependencyInDirectory(directory, 'compile', AWS_SSM_GROUP, AWS_SSM_ARTIFACT, AWS_SSM_VERSION); + this.addGradleDependencyInDirectory(directory, 'compile', SPRING_CLOUD_GROUP, SPRING_CLOUD_ARTIFACT, SPRING_CLOUD_CTX_VERSION); + } + }); + }, + setAuroraParameters() { + if (this.abort) return; + this.appConfigs.forEach((appConfig) => { + const app = this.aws.apps.find(a => a.baseName === appConfig.baseName); + app.dbType = appConfig.prodDatabaseType; + app.auroraEngine = appConfig.dbType === 'postgresql' ? 'aurora-postgresql' : 'aurora'; + app.auroraFamily = appConfig.dbType === 'postgresql' ? 'aurora-postgresql9.6' : 'aurora5.6'; + app.auroraClusterParam = appConfig.dbType === 'postgresql' ? 'client_encoding: UTF8' : 'character_set_database: utf8'; + app.auroraDbParam = appConfig.dbType === 'postgresql' ? 'check_function_bodies: 0' : 'sql_mode: IGNORE_SPACE'; + }); + }, + springProjectChanges() { + if (this.abort) return; + const done = this.async(); + + this.appConfigs.forEach((config) => { + const directory = `${this.directoryPath}${config.appFolder}`; + this.temp = { + baseName: config.baseName, + packageName: config.packageName + }; + this.template(AWSSSM_CONFIG_FILENAME, AWSSSM_CONFIG_PATH(directory, config.packageFolder)); + this.template(SPRING_FACTORIES_FILENAME, SPRING_FACTORIES_PATH(directory)); + this.template(BOOTSTRAP_FILENAME, BOOTSTRAP_PATH(directory)); + }); + + this.conflicter.resolve(() => { + delete this.temp; + done(); + }); + }, + generateCloudFormationTemplate() { + if (this.abort) return; + const done = this.async(); + + this.template(BASE_TEMPLATE_FILENAME, BASE_TEMPLATE_PATH); + this.aws.apps.forEach(config => this.template( + APP_TEMPLATE_FILENAME, + APP_TEMPLATE_PATH(config.baseName), + null, + {}, + { aws: this.aws, app: config } + )); + this.conflicter.resolve(() => { + done(); + }); + } + }; + } + + _uploadTemplateToAWS(filename, path) { + if (this.abort) return null; + const done = this.async; + + return awsClient.uploadTemplate(this.aws.s3BucketName, filename, path) + .then((result) => { + this.log.ok(`${chalk.bold(filename)} has been updated to the S3 Bucket and can be found here: ${chalk.underline(result.Location)}`); + done(); + return result; + }) + .catch((error) => { + this.log.error(error.message); + this.abort = true; + done(); + }); + } + + get end() { + return { + checkAndBuildImages() { + if (this.abort || !this.deployNow || this.skipBuild) return null; + const done = this.async(); + const promises = this.appConfigs.map(config => dockerUtils.checkAndBuildImages.call( + this, + { + cwd: `${this.directoryPath}${config.appFolder}/`, + appConfig: { buildTool: config.buildTool } + } + )); + + return Promise.all(promises) + .then(() => done()) + .catch(() => { + this.abort = true; + done(); + }); + }, + createS3Bucket() { + if (this.abort || !this.deployNow) return null; + const done = this.async; + return awsClient.createS3Bucket(this.aws.s3BucketName, this.aws.region) + .then((result) => { + this.aws.s3bucketLocation = result.Location; + this.log.ok(`The S3 Bucket ${chalk.bold(this.aws.s3BucketName)} has been created.`); + done(); + }) + .catch((error) => { + this.log.error(`Could not create the S3 bucket : ${error.message}`); + this.abort = true; + done(); + }); + }, + uploadBaseTemplate() { + if (this.abort || !this.deployNow) return null; + return this._uploadTemplateToAWS('base.template.yml', BASE_TEMPLATE_PATH) + .then((result) => { + this.aws.s3BaseTemplate = result; + }); + }, + uploadAppTemplate() { + if (this.abort || !this.deployNow) return null; + const done = this.async; + const promises = this.aws.apps.map(config => this._uploadTemplateToAWS(APP_TEMPLATE_PATH(config.baseName), APP_TEMPLATE_PATH(config.baseName))); + + return Promise.all(promises) + .then(() => done()) + .catch((e) => { + this.abort = true; + done(); + }); + }, + createOrUpdateStack() { + if (this.abort || !this.deployNow) return null; + const done = this.async; + + return awsClient.CF().getStack(this.aws.cloudFormationName) + .then(() => { + const promises = this.aws.apps.map(app => awsClient.CF().getStack(app.stackId) + .then(() => { + this.log.ok(`Existing CloudFormation Stack for app ${chalk.bold(app.baseName)} Found ☁️`); + }) + .catch(() => { + this.log.error(`Issue retrieving nested stack for app ${chalk.bold(app.baseName)} doesn't exist`); + })); + + return Promise.all(promises) + .then(() => done()) + .catch(() => { + this.abort = true; + done(); + }); + }) + .catch(() => { + this.log.ok('Initialising CloudFormation Stack ☁️ (this can take up to 15 minutes depending on load)'); + const databasePasswords = this.awsFacts.apps.map(a => awsClient.CF().cfParameter(`${a.baseName}DBPassword`, a.database_Password)); + return awsClient.CF().createCloudFormationStack(this.aws.cloudFormationName, this.aws.s3BaseTemplate.Location, databasePasswords) + .then((result) => { + this.log.ok(`The CloudFormation Stack ${chalk.bold(result.StackId)} has been created.`); + this.aws.apps.forEach((app) => { + const stack = result.nestedStacks.find(stack => stack.appName.includes(app.baseName)); + app.stackId = stack.stackId; + }); + done(); + }) + .catch((error) => { + this.log.error(`There was an error creating the stack: ${error.message}`); + this.abort = true; + done(); + }); + }); + }, + getElasticContainerRepositoryName() { + if (this.abort || !this.deployNow) return null; + const done = this.async; + + const promises = this.aws.apps.map(app => awsClient.CF().getEcrId(app.stackId) + .then((result) => { + app.EcrRepositoryName = result; + this.log.ok(`ECR Repository ID for app ${chalk.bold(app.baseName)} was found: ${result}`); + }) + .catch((error) => { + this.log.error(`Couldn't get ECR Repository Id for app ${chalk.bold(app.baseName)}: ${error.message}`); + this.abort = true; + })); + + return Promise.all(promises) + .then(() => done()); + }, + setSSMDatabasePassword() { + if (this.abort || !this.deployNow) return null; + const done = this.async; + + const promises = this.aws.apps.map((app) => { + const fact = this.awsFacts.apps.find(fact => fact.baseName === app.baseName); + + if (fact.database_Password_InSSM) { + return null; + } + const passwordProperty = ssmPasswordProperty(this.aws.cloudFormationName, app.baseName); + return awsClient.SSM().setSSMParameter( + passwordProperty, + fact.database_Password, + `Database master password for ${app.baseName}` + ).then(() => { + this.log.ok(`Password has been set in ASM Parameter: ${passwordProperty}`); + }).catch((err) => { + this.log.error(`Issue setting SSM property. Error: ${err.message}`); + }); + }); + + return Promise.all(promises) + .then(() => done()) + .catch(() => { + this.abort = true; + done(); + }); + }, + getEcrRepositoryURI() { + if (this.abort || !this.deployNow) return null; + const done = this.async; + + const promises = this.aws.apps.map(app => awsClient.ECR().getEcrRepositoryURI(app.EcrRepositoryName) + .then((uri) => { + app.EcrRepositoryUri = uri; + this.log.ok(`ECR Repository URI for ${app.baseName} was found: ${uri}`); + }) + .catch((error) => { + this.log.error(`Couldn't get ECR URI for ${app.baseName} : ${error.message}`); + this.abort = true; + })); + + return Promise.all(promises) + .then(() => done()) + .catch((e) => { + this.abort = true; + done(); + }); + }, + tagDockerImage() { + if (this.abort || !this.deployNow || this.skipUpload) return null; + const done = this.async; + + const promises = this.aws.apps.map((app) => { + const from = `${app.baseName}:latest`; + const to = `${app.EcrRepositoryUri}:latest`; + return dockerCli.tagImage(from, to) + .then(() => { + app.dockerImageTag = to; + this.log.ok(`The Docker image was tagged: ${chalk.bold(to)}`); + }) + .catch((error) => { + this.log.error(error); + this.log.error(`There was an error tagging the Docker image: ${error.message}`); + }); + }); + + return Promise.all(promises) + .then(() => done()) + .catch((e) => { + this.abort = true; + done(); + }); + }, + loginToAws() { + if (this.abort || !this.deployNow || this.skipUpload) return null; + const done = this.async; + return dockerCli.loginToAws(this.aws.region, this.aws.dockerLogin.accountId, this.aws.dockerLogin.username, this.aws.dockerLogin.password) + .then(() => { + this.log.ok(`Docker is now connected to your account in the region ${this.aws.region}.`); + done(); + }) + .catch(() => { + this.log.error('Couldn\'t connect to AWS with Docker'); + this.abort = true; + done(); + }); + }, + pushDockerImage() { + if (this.abort || !this.deployNow || this.skipUpload) return null; + const done = this.async; + + const promises = this.aws.apps.map((app) => { + const repository = `${app.EcrRepositoryUri}:latest`; + return dockerCli.pushImage(repository) + .then((ok) => { + this.log.ok(`Image is now pushed to repository ${repository}.`); + }) + .catch((err) => { + this.log.error('Couldn\'t push image to AWS ECR Repository'); + }); + }); + + return Promise.all(promises) + .then(() => done()) + .catch(() => { + this.abort = true; + done(); + }); + }, + updateStack() { + if (this.abort || !this.deployNow) return null; + const done = this.async; + const databasePasswords = this.awsFacts.apps.map(a => awsClient.CF().cfParameter(`${a.baseName}DBPassword`, a.database_Password)); + const nestedStackIds = this.aws.apps.map(app => app.stackId); + this.log.ok('Updating Existing CloudFormation Stack ☁️'); + return awsClient.CF().updateCloudFormationStack( + this.aws.cloudFormationName, nestedStackIds, + this.aws.s3BaseTemplate.Location, databasePasswords, 'true' + ).then((result) => { + this.log(`The CloudFormation Stack ${chalk.bold(this.aws.cloudFormationName)} has been updated`); + if (_.has(result, 'Stacks[0].Outputs')) { + this.log.ok('Applications Accessible at Load Balancers:'); + _(result).get('Stacks[0].Outputs') + .filter(output => output.OutputKey.startsWith('LoadBalancerOutput')) + .forEach(output => this.log(`\thttp://${output.OutputValue}`)); + } + + done(); + }).catch((error) => { + this.log.error(`There was an error updating the stack: ${error.message}`); + this.abort = true; + done(); + }); + }, + saveConf() { + delete this.aws.dockerLogin; + this.config.set(constants.conf.aws, this.aws); + } + }; + } +}; diff --git a/generators/aws-containers/lib/cf.js b/generators/aws-containers/lib/cf.js new file mode 100644 index 000000000000..6ce7fafa2dc9 --- /dev/null +++ b/generators/aws-containers/lib/cf.js @@ -0,0 +1,301 @@ +const AWS = require('aws-sdk'); // eslint-disable-line +const _ = require('lodash'); +const chalk = require('chalk'); + +const spinner = require('../../utils').spinner; + +const DEFAULT_REGION = 'us-east-1'; + +const STACK_LISTENER_INTERVAL = 15000; +const PHYSICAL_RESOURCE_SEPARATOR = ':'; +const STACK_EVENT_STATUS_DISPLAY_LENGTH = 35; + +let stdOut = message => console.error(message.trim()); +let stdErr = message => console.error(message.trim()); // eslint-disable-line +module.exports = class CF { + constructor(region) { + this.cloudFormation = new AWS.CloudFormation({ region }); + } + + setOutputs(stdout, stderr) { + stdOut = stdout; + stdErr = stderr; + } + + /** + * Create an object which can be supplied to cloudformation for initialisation + * @param key + * @param value + * @returns {{ParameterKey: *, ParameterValue: *}} + */ + cfParameter(key, value) { + return { ParameterKey: key, ParameterValue: value }; + } + + /** + * Create a CloudFormation Stack in AWS + * @param stackName the stack to give to the name. It MUST to be unique in the WHOLE region the Stack is created in + * @param templateUrl url to the desired template. + * @param additionalParams additional parrameter array to add to CF script + * @returns {Promise.} + */ + createCloudFormationStack(stackName, templateUrl, additionalParams = []) { + return this.cloudFormation.createStack({ + StackName: stackName, + Capabilities: ['CAPABILITY_IAM'], + OnFailure: 'DELETE', + Parameters: [this.cfParameter('shouldDeployService', 'false')].concat(additionalParams), + TemplateURL: templateUrl, + }).promise() + .then(() => this._stackCreationEventListener(stackName)); + } + + /** + * Fetch the name of the ECR repository out of the CloudFormation Stack. + * @param stackId + * @returns {Promise.} + */ + getEcrId(stackId) { + return this.cloudFormation.describeStackResource({ + StackName: stackId, + LogicalResourceId: 'JHipsterContainerRegistry' + }).promise() + .then(data => data.StackResourceDetail.PhysicalResourceId); + } + + /** + * Check that the AWS CloudFormation stack exists. + * @param stackName the name of the stack to look for. + * @returns {Promise>} + */ + getStack(stackName) { + return this.cloudFormation.describeStacks({ StackName: stackName }).promise(); + } + + /** + * Create a Listener for the Parent Stack we create. It will listen to all of the events and display them. + * @param stackName the name of the stack to listen to. + * @returns {Promise} + * @private + */ + _stackCreationEventListener(stackName) { + return new Promise((resolve, reject) => { + const params = { StackName: stackName }; + const parentEvents = {}; + let listenerInterval; + let previousEventId; + const nestedStacks = {}; + + const cancel = (error) => { + clearInterval(listenerInterval); + _.forEach(nestedStacks, o => clearInterval(o.listenerInterval)); + reject(error); + }; + const complete = (result) => { + clearInterval(listenerInterval); + _.forEach(nestedStacks, o => clearInterval(o.listenerInterval)); + resolve(Object.assign( + {}, + result.Stacks[0], + { + nestedStacks: _.map(nestedStacks, (stack, key) => ({ + appName: stack.appName, + stackId: key + })) + } + )); + }; + + listenerInterval = setInterval( + () => this.cloudFormation.describeStackEvents(params).promise() + .then((result) => { + const unproceedEvents = _.chain(result.StackEvents).filter(event => !_.has(parentEvents, event.EventId)).reverse().value(); + unproceedEvents.forEach((stack) => { + parentEvents[stack.EventId] = stack; + if (stack.EventId !== previousEventId) { + stdOut(_getStackLogLine(stack)); + previousEventId = stack.EventId; + } + if (_isStackEventError(stack)) { + cancel(new Error('Creation of stack failed.')); + } + + if (_.has(nestedStacks, stack.PhysicalResourceId) + || !_doesEventContainsNestedStackId(stack)) { + return; + } + + const nestedStackId = stack.PhysicalResourceId; + nestedStacks[nestedStackId] = { + appName: stack.LogicalResourceId, + listenerInterval: null, + events: {}, + previousEventId: null + }; + nestedStacks[nestedStackId].listenerInterval = setInterval( + () => + this.cloudFormation.describeStackEvents({ StackName: nestedStackId }).promise() + .then((result) => { + const stackMeta = nestedStacks[nestedStackId]; + const unproceedEvents = _.chain(result.StackEvents).filter(event => !_.has(stackMeta.events, event.EventId)).reverse().value(); + + unproceedEvents.forEach((stack) => { + stackMeta.events[stack.EventId] = stack; + + if (stack.EventId !== stackMeta.previousEventId) { + stdOut(_getStackLogLine(stack, 1)); + stackMeta.previousEventId = stack.EventId; + } + if (_isStackEventError(stack)) { + cancel(new Error('Creation of nested stack failed')); + } + }); + }) + .catch(cancel), + STACK_LISTENER_INTERVAL + ); + }); + }) + .catch(cancel), + STACK_LISTENER_INTERVAL + ); + + return this.cloudFormation.waitFor('stackCreateComplete', params).promise() + .then(complete) + .catch(cancel); + }); + } + + /** + * TODO: Manage deletion/recreation of nested stack + * @param stackName + * @param nestedStackNames + */ + updateCloudFormationStack(stackName, nestedStackNames = [], templateUrl, additionalParams, deployService = 'false') { + /* + createCloudFormationStack(stackName, templateUrl, additionalParams = []) { + return this.cloudFormation.createStack({ + StackName: stackName, + Capabilities: ['CAPABILITY_IAM'], + OnFailure: 'DELETE', + Parameters: [this.cfParameter('shouldDeployService', 'false')].concat(additionalParams), + */ + const stackToListen = _.concat(nestedStackNames, stackName); + const listeners = { 0: { previousEventId: null, listenerId: null } }; + const clearIntervals = () => _.forEach(listeners, l => clearInterval(l.listenerId)); + + return new Promise((resolve, reject) => this.cloudFormation.updateStack({ + StackName: stackName, + Capabilities: ['CAPABILITY_IAM'], + Parameters: [this.cfParameter('shouldDeployService', deployService)].concat(additionalParams), + TemplateURL: templateUrl, + }).promise() + .catch(reject) + .then(() => { + const success = (result) => { clearIntervals(); resolve(result); }; + const failure = (error) => { clearIntervals(); reject(error); }; + _.forEach(stackToListen, (id) => { + listeners[id] = {}; + listeners[id].previousEventId = null; + listeners[id].listenerId = setInterval( + () => this.cloudFormation.describeStackEvents({ StackName: id }).promise() + .then((result) => { + const listener = listeners[id]; + const stack = result.StackEvents[0]; + if (stack.EventId !== listener.previousEventId) { + stdOut(_getStackLogLine(stack, 1)); + listener.previousEventId = stack.EventId; + listeners[id] = listener; + } + if (_isStackEventError(stack)) { + failure(new Error('Creation of nested stack failed')); + } + }) + .catch(failure) + , STACK_LISTENER_INTERVAL + ); + }); + + return this.cloudFormation.waitFor('stackUpdateComplete', { StackName: stackName }).promise() + .then(success) + .catch(failure); + })); + } +}; + +/** + * Check if the stack event contains the name a Nested Stack name. + * @param stack The StackEvent object. + * @returns {boolean} true if the object contain a Nested Stack name, false otherwise. + * @private + */ +function _doesEventContainsNestedStackId(stack) { + if (stack.ResourceType !== 'AWS::CloudFormation::Stack') { + return false; + } + if (stack.ResourceStatusReason !== 'Resource creation Initiated') { + return false; + } + if (stack.ResourceStatus !== 'CREATE_IN_PROGRESS') { + return false; + } + if (_.isNil(stack.PhysicalResourceId)) { + return false; + } + + return _hasLabelNestedStackName(stack.PhysicalResourceId); +} + +/** + * returns a formatted status string ready to be displayed + * @param status + * @returns {*} a string + * @private + */ +function _formatStatus(status) { + let statusColorFn = chalk.grey; + if (_.endsWith(status, 'IN_PROGRESS')) { + statusColorFn = chalk.yellow; + } else if (_.endsWith(status, 'FAILED') || _.startsWith(status, 'DELETE')) { + statusColorFn = chalk.red; + } else if (_.endsWith(status, 'COMPLETE')) { + statusColorFn = chalk.greenBright; + } + + const sanitizedStatus = _.replace(status, '_', ' '); + const paddedStatus = _.padEnd(sanitizedStatus, STACK_EVENT_STATUS_DISPLAY_LENGTH); + return statusColorFn(paddedStatus); +} + +/** + * Generate an enriched string to display a CloudFormation Stack creation event. + * @param stack Stack event + * @param indentation level of indentation to display (between the date and the rest of the log) + * @returns {string} + * @private + */ +function _getStackLogLine(stack, indentation = 0) { + const time = chalk.blue(`${stack.Timestamp.toLocaleTimeString()}`); + const spacing = _.repeat('\t', indentation); + const status = _formatStatus(stack.ResourceStatus); + + const stackName = chalk.grey(stack.StackName); + const resourceType = chalk.bold(stack.ResourceType); + return `${time} ${spacing}${status} ${resourceType}\t${stackName}`; +} + + +/** + * Check if the PhysicalResourceId label contains a Nested Stack name. + * @param physicalResource the label to evaluate + * @returns {boolean|*} + * @private + */ +function _hasLabelNestedStackName(physicalResource) { + return _(physicalResource).split(PHYSICAL_RESOURCE_SEPARATOR).last().startsWith('stack/'); +} + + +function _isStackEventError(stack) { + return _.includes(['CREATE_FAILED', 'DELETE_IN_PROGRESS'], stack.ResourceStatus); +} diff --git a/generators/aws-containers/lib/ecr.js b/generators/aws-containers/lib/ecr.js new file mode 100644 index 000000000000..9f28237a93ce --- /dev/null +++ b/generators/aws-containers/lib/ecr.js @@ -0,0 +1,28 @@ +const AWS = require('aws-sdk'); // eslint-disable-line +const _ = require('lodash'); + +module.exports = class ECR { + constructor(region) { + this.ecr = new AWS.ECR({ region }); + } + + /** + * Returns the instance of the ECR class from the SDK + * @returns {AWS.ECR|ECR} + */ + get sdk() { + return this.ecr; + } + + /** + * Fetch the URI of the ECR repository off. + * @param respositoryName + * @returns {Promise.} + */ + getEcrRepositoryURI(respositoryName) { + return this.ecr.describeRepositories({ + repositoryNames: [respositoryName] + }).promise() + .then(result => _(result.repositories).first().repositoryUri); + } +}; diff --git a/generators/aws-containers/lib/ssm.js b/generators/aws-containers/lib/ssm.js new file mode 100644 index 000000000000..d5667e87d513 --- /dev/null +++ b/generators/aws-containers/lib/ssm.js @@ -0,0 +1,42 @@ +const AWS = require('aws-sdk'); // eslint-disable-line +const spinner = require('./utils').spinner; +const _ = require('lodash'); + +module.exports = class AwSSM { + constructor(region) { + this.ssm = new AWS.SSM({ region }); + } + + /** + * Returns an AWS SSM managed parameter. If no property can be found, an undefined value is returned + * @param parameterName Name of the parameter to retrieve + */ + getSSMParameter(parameterName) { + return spinner(this.ssm.getParameters({ + Names: [parameterName], + WithDecryption: true + }).promise() + .then(resultParams => _.get(resultParams, 'Parameters[0].Value'))); + } + + /** + * Sets an AWS SSM Managed parameter + * @param parameterName + * The name of the parameter to set + * @param parameterValue + * The value of the parameter + * @param description + * A description to associate with the parameter + * @param parameterType + * The type of parameter, either "String", "StringList" or "SecureString" + */ + setSSMParameter(parameterName, parameterValue, description = '', parameterType = 'SecureString') { + return spinner(this.ssm.putParameter({ + Name: parameterName, + Type: parameterType, + Description: description, + Value: parameterValue, + Overwrite: true + }).promise()); + } +}; diff --git a/generators/aws-containers/lib/utils.js b/generators/aws-containers/lib/utils.js new file mode 100644 index 000000000000..7b36b4ea212c --- /dev/null +++ b/generators/aws-containers/lib/utils.js @@ -0,0 +1,33 @@ +const _ = require('lodash'); // eslint-disable-line +const ora = require('ora'); // eslint-disable-line + +/** + * Wraps the promise in a CLI spinner + * @param promise + * @param text + * @param spinnerIcon + */ +function spinner(promise, text = 'loading', spinnerIcon = 'monkey') { + const spinner = ora({ spinner: spinnerIcon, text }).start(); + return new Promise((resolve, reject) => { + promise.then((resolved) => { + spinner.stop(); + resolve(resolved); + }).catch((err) => { + spinner.stop(); + reject(err); + }); + }); +} + +function formatRDSUsername(username) { + return _.chain(username) + .replace('_', '') + .truncate({ length: 16, omission: '' }) + .value(); +} + +module.exports = { + spinner, + formatRDSUsername +}; diff --git a/generators/aws-containers/prompts.js b/generators/aws-containers/prompts.js new file mode 100644 index 000000000000..3303be32ba78 --- /dev/null +++ b/generators/aws-containers/prompts.js @@ -0,0 +1,392 @@ +const _ = require('lodash'); +const chalk = require('chalk'); + +const AURORA_DB_PASSORD_REGEX = /^[^@"\/]{8,42}$/; // eslint-disable-line +const PERF_TO_CONFIG = { + low: { + fargate: { + taskCount: 1, + CPU: '1024', + memory: '2GB' + }, + database: { + instances: 1, + size: 'db.t2.small' + } + }, + medium: { + fargate: { + taskCount: 2, + CPU: '2048', + memory: '4GB' + }, + database: { + instances: 1, + size: 'db.t2.medium' + } + }, + high: { + fargate: { + taskCount: 4, + CPU: '4096', + memory: '16GB' + }, + database: { + instances: 2, + size: 'db.r4.large' + } + } +}; + +let regionList; + +/* + * All the prompting is made through Inquirer.js. Documentation here: + * https://github.com/SBoudrias/Inquirer.js/ + * Use the dummy question as an example. + */ +module.exports = { + setRegionList, + askTypeOfApplication, + askRegion, + askCloudFormation, + askPerformances, + askVPC, + askForDBPasswords, + askForSubnets, + askDeployNow +}; + +function setRegionList(regions) { + regionList = regions; +} + +function _getFriendlyNameFromTag(awsObject) { + return _.get(_(awsObject.Tags).find({ Key: 'Name' }), 'Value'); +} + +/** + * Ask user what type of application is to be created? + */ +function askTypeOfApplication() { + if (this.abort) return null; + const done = this.async(); + + const prompts = [{ + type: 'list', + name: 'applicationType', + message: 'Which *type* of application would you like to deploy?', + choices: [ + { + value: 'monolith', + name: 'Monolithic application' + }, + { + value: 'microservice', + name: 'Microservice application' + } + ], + default: 'monolith' + }]; + + return this.prompt(prompts).then((props) => { + const applicationType = props.applicationType; + this.composeApplicationType = props.applicationType; + if (applicationType) { + this.log(applicationType); + done(); + } else { + this.abort = true; + done(); + } + }); +} + + +/** + * Ask user what type of Region is to be created? + */ +function askRegion() { + if (this.abort) return null; + const done = this.async(); + const prompts = [ + { + type: 'list', + name: 'region', + message: 'Which region?', + choices: regionList, + default: (this.aws.region) ? _.indexOf(regionList, this.aws.region) : this.awsFacts.defaultRegion + } + ]; + + return this.prompt(prompts).then((props) => { + const region = props.region; + if (region) { + this.aws.region = region; + done(); + } else { + this.abort = true; + done(); + } + }); +} + +/** + * Ask user for CloudFormation name. + */ +function askCloudFormation() { + if (this.abort) return null; + const done = this.async(); + const prompts = [ + { + type: 'input', + name: 'cloudFormationName', + message: 'Please enter your stack\'s name. (must be unique within a region)', + default: this.aws.cloudFormationName || this.baseName, + validate: (input) => { + if (input) { + return true; + } + + return 'Stack\'s name cannot be empty!'; + } + } + ]; + + return this.prompt(prompts).then((props) => { + const cloudFormationName = props.cloudFormationName; + if (cloudFormationName) { + this.aws.cloudFormationName = cloudFormationName; + while (this.aws.cloudFormationName.includes('_')) { + this.aws.cloudFormationName = _.replace(this.aws.cloudFormationName, '_', ''); + } + this.log(`CloudFormation Stack name will be ${this.aws.cloudFormationName}`); + done(); + } else { + this.abort = true; + done(); + } + }); +} + + +/** + * As user to select AWS performance. + */ +function askPerformances() { + if (this.abort) return null; + const done = this.async(); + const chainPromises = (index) => { + if (index === this.appConfigs.length) { + done(); + return null; + } + const config = this.appConfigs[index]; + const awsConfig = this.aws.apps.find(a => a.baseName === config.baseName) || { baseName: config.baseName }; + return promptPerformance.call(this, config, awsConfig).then((performance) => { + awsConfig.performance = performance; + awsConfig.fargate = PERF_TO_CONFIG[performance].fargate; + awsConfig.database = PERF_TO_CONFIG[performance].database; + + _.remove(this.aws.apps, a => _.isEqual(a, awsConfig)); + this.aws.apps.push(awsConfig); + return chainPromises(index + 1); + }); + }; + + return chainPromises(0); +} + +function promptPerformance(config, awsConfig = { performance: 'low' }) { + if (this.abort) return null; + + const performanceLevels = _.keys(PERF_TO_CONFIG) + .map((key) => { + const perf = PERF_TO_CONFIG[key]; + return { + name: `${_.startCase(key)} Performance \t ${chalk.green(`Task: ${perf.fargate.CPU} CPU Units, ${perf.fargate.memory} Ram, Count: ${perf.fargate.taskCount}`)}\t ${chalk.yellow(`DB: ${perf.database.instances} Instance, Size: ${perf.database.size}`)}`, + value: key, + short: key + }; + }); + + const prompts = [ + { + type: 'list', + name: 'performance', + message: `${chalk.red(config.baseName)} Please select your performance.`, + choices: performanceLevels, + default: awsConfig.performance, + validate: (input) => { + if (!input) { + return 'You Must choose at least one performance!'; + } + if (input !== 'high' && config.prodDatabaseType === 'postgresql') { + return 'Aurora DB for postgresql is limited to the high performance configuration'; + } + return true; + } + } + ]; + + return this.prompt(prompts).then((props) => { + const performance = props.performance; + return performance; + }); +} + +/** + * Ask user to select target Virtual Private Network + */ +function askVPC() { + if (this.abort) return null; + const done = this.async(); + + const vpcList = this.awsFacts.availableVpcs.map((vpc) => { + const friendlyName = _getFriendlyNameFromTag(vpc); + return { + name: `ID: ${vpc.VpcId} (${friendlyName ? `name: '${friendlyName}', ` : ''}default: ${vpc.IsDefault}, state: ${vpc.State})`, + value: vpc.VpcId, + short: vpc.VpcId + }; + }); + + const prompts = [ + { + type: 'list', + name: 'targetVPC', + message: 'Please select your target Virtual Private Network.', + choices: vpcList, + default: this.aws.vpc.id + } + ]; + + return this.prompt(prompts).then((props) => { + const targetVPC = props.targetVPC; + if (targetVPC) { + this.aws.vpc.id = targetVPC; + this.aws.vpc.cidr = _.find(this.awsFacts.availableVpcs, ['VpcId', targetVPC]).CidrBlock; + done(); + } else { + this.abort = true; + done(); + } + }); +} + +/** + * Ask user to select availability information (availability, zones)/ + */ +function askForSubnets() { + if (this.abort) return null; + const done = this.async(); + + const subnetList = _.map(this.awsFacts.availableSubnets, (sn) => { + const friendlyName = _getFriendlyNameFromTag(sn); + const formattedFriendlyName = friendlyName ? `name: '${friendlyName}', ` : ''; + return { + name: `${sn.SubnetId} (${formattedFriendlyName}Availability Zone: ${sn.AvailabilityZone}, Public IP On Launch: ${sn.MapPublicIpOnLaunch ? 'yes' : 'no'})`, + value: sn.SubnetId, + short: sn.SubnetId + }; + }); + + const defaultSubnetValue = storedSubnetValue => storedSubnetValue || [_.get(this.awsFacts.availableSubnets, '[0].SubnetId'), _.get(this.awsFacts.availableSubnets, '[1].SubnetId')]; + const validateSubnet = input => (_.isEmpty(input) || (_.isArray(input) && input.length < 2) ? 'You must select two or more subnets' : true); + + const prompts = [ + { + type: 'checkbox', + name: 'elbSubnets', + message: `Which subnets should we deploy the ${chalk.yellow('Network Load Balancer (ELB)')} to?`, + choices: subnetList, + default: defaultSubnetValue(this.aws.vpc.elbSubnets), + validate: validateSubnet + }, + { + type: 'checkbox', + name: 'appSubnets', + message: `Which subnets should we deploy the ${chalk.yellow('Application & Database')} to?`, + choices: subnetList, + default: defaultSubnetValue(this.aws.vpc.appSubnets), + validate: validateSubnet + }, + ]; + + return this.prompt(prompts).then((props) => { + const publicIpOnLaunchArray = appSubnets => + _.chain(this.awsFacts.availableSubnets) + .filter(availableSubnet => _.includes(appSubnets, availableSubnet.SubnetId)) + .map('MapPublicIpOnLaunch') + .uniq() + .value(); + + const uniqueIPLaunch = publicIpOnLaunchArray(props.appSubnets); + const shouldAppHavePublicIP = _.head(uniqueIPLaunch); + if (uniqueIPLaunch.length !== 1) { + this.log.ok(`⚠️ Mix of Application Subnets containing contradictory 'MapPublic Ip On Launch' values. Defaulting to '${shouldAppHavePublicIP ? 'yes' : 'no'}'`); + } + + this.aws.vpc.elbSubnets = props.elbSubnets; + this.aws.vpc.appSubnets = props.appSubnets; + this.aws.vpc.appSubnetsLaunchWithPublicIP = shouldAppHavePublicIP; + done(); + }); +} + +function askForDBPasswords() { + if (this.abort) return null; + const done = this.async(); + const chainPromises = (index) => { + if (index === this.appConfigs.length) { + done(); + return null; + } + const config = this.appConfigs[index]; + const appConfig = this.awsFacts.apps.find(a => a.baseName === config.baseName) || { baseName: config.baseName }; + return promptDBPassword.call(this, appConfig).then((password) => { + appConfig.database_Password = password; + _.remove(this.awsFacts.apps, a => _.isEqual(a, appConfig)); + this.awsFacts.apps.push(appConfig); + return chainPromises(index + 1); + }); + }; + + return chainPromises(0); +} + +function promptDBPassword(config) { + if (config.database_Password_InSSM) return new Promise(resolve => resolve(config.database_Password)); + + const prompts = [ + { + type: 'password', + name: 'database_Password', + message: `${chalk.red(config.baseName)} Please enter the password for the database. ${chalk.yellow('This value will be stored within Amazon SSM, and not within .yo-rc.json')}`, + validate: input => ((_.isEmpty(input) || !input.match(AURORA_DB_PASSORD_REGEX)) ? 'Password must be between 8 - 42 characters, and not contain an """, "/" or "@"' : true) + } + ]; + + return this.prompt(prompts).then(props => props.database_Password); +} + +/** + * Ask user if they would like to deploy now? + */ +function askDeployNow() { + if (this.abort) return null; + const done = this.async(); + const prompts = [ + { + type: 'confirm', + name: 'deployNow', + message: 'Would you like to deploy now?.', + default: true + } + ]; + + return this.prompt(prompts).then((props) => { + this.deployNow = props.deployNow; + done(); + }); +} diff --git a/generators/aws-containers/templates/_AwsSSMConfiguration.java b/generators/aws-containers/templates/_AwsSSMConfiguration.java new file mode 100644 index 000000000000..24a9d8158ad3 --- /dev/null +++ b/generators/aws-containers/templates/_AwsSSMConfiguration.java @@ -0,0 +1,74 @@ +<%# + Copyright 2013-2018 the original author or authors from the JHipster project. + + This file is part of the JHipster project,see http://www.jhipster.tech/ + for more information. + + Licensed under the Apache License,Version2.0(the"License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing,software + distributed under the License is distributed on an"AS IS"BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + -%> +package <%=temp.packageName%>.bootstrap; + +import com.amazonaws.services.simplesystemsmanagement.AWSSimpleSystemsManagement; +import com.amazonaws.services.simplesystemsmanagement.AWSSimpleSystemsManagementClientBuilder; +import com.amazonaws.services.simplesystemsmanagement.model.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.cloud.bootstrap.config.PropertySourceLocator; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Profile; +import org.springframework.core.env.Environment; +import org.springframework.core.env.MapPropertySource; +import org.springframework.core.env.PropertySource; + +import java.util.HashMap; +import java.util.Map; + +<%# + Based off the following AWS SSM demonstration code: https://github.com/wcurrie/aws-ssm-demo/blob/master/src/main/java/com/example/config/AwsSsmConfig.java +-%> +@Configuration +// TODO: To be replaced with a constant once change has been made to https://github.com/jhipster/jhipster +@Profile("aws") +public class AwsSSMConfiguration implements PropertySourceLocator { + private final Logger log = LoggerFactory.getLogger(AwsSSMConfiguration.class); + + @Value("${spring.application.name}") + private String applicationName; + + @Value("${cloud.aws.stack.name}") + private String stackName; + + @Override + public PropertySource locate(Environment environment) { + log.info("Retrieving configuration from AWS Simple Systems Management(SSM)"); + AWSSimpleSystemsManagement awsClient = AWSSimpleSystemsManagementClientBuilder.defaultClient(); + + final String pathPrefix = String.format("/%s/%s/",stackName, applicationName); + + GetParametersByPathResult getParameterResults = awsClient.getParametersByPath(new GetParametersByPathRequest() + .withPath(pathPrefix) + .withRecursive(true) + .withWithDecryption(true)); + Map config = new HashMap<>(); + + getParameterResults.getParameters() + .forEach(parameter -> { + final String paramName = parameter.getName().replaceFirst(pathPrefix, ""); + log.debug("Found parameter within SSM : {}", paramName); + config.put(paramName, parameter.getValue()); + }); + + return new MapPropertySource(this.getClass().getCanonicalName(), config); + } +} diff --git a/generators/aws-containers/templates/_application.template.yml b/generators/aws-containers/templates/_application.template.yml new file mode 100644 index 000000000000..3433d460c987 --- /dev/null +++ b/generators/aws-containers/templates/_application.template.yml @@ -0,0 +1,308 @@ +AWSTemplateFormatVersion: 2010-09-09 +Parameters: + shouldDeployService: + Type: String + Description: Should the ECS service be initialised + AllowedValues: + - true + - false + Default: false + parentStackName: + Type: String + repositoryName: + Type: String + Description: Name of the Container Repository to Create + databaseName: + Type: String + Description: Name of the database to use + Default: monolithDB + fargateCPU: + Type: String + Description: Fargate CPU units + AllowedValues: + - 256 + - 512 + - 1024 + - 2048 + - 4096 + Default: '<%= app.fargate.CPU %>' + fargateMemory: + Type: String + Description: Amount of memory available for Fargate + AllowedPattern: (512MB)|([1-3]?\dGB) + Default: <%= app.fargate.memory %> + vpcID: + Type: String + Description: VPC Target + Default: <%= aws.vpc.id %> + elbSubnets: + Type: CommaDelimitedList + Description: List of subnets to use + Default: <%= aws.vpc.elbSubnets.join(', ') %> + appSubnets: + Type: CommaDelimitedList + Description: List of subnets to use + Default: <%= aws.vpc.appSubnets.join(', ') %> + AppDeployUsingPublicIP: + Type: String + Description: Should the Application be Deployed with a Public IP + Default: <%= aws.vpc.appSubnetsLaunchWithPublicIP ? 'ENABLED' : 'DISABLED' %> + vpcCIDR: + Type: String + Description: CIDR IPv4 range for the VPC + Default: <%= aws.vpc.cidr %> + TaskCount: + Type: Number + Description: Desired task count + Default: <%= app.fargate.taskCount %> + databaseSize: + Type: String + Description: Size of the Aurora Instance + Default: <%= app.database.size %> + databaseUsername: + Type: String + Description: Database username + Default: <%= app.baseName %> + databasePassword: + Type: String + Description: Database password + NoEcho: true +Conditions: + DeployService: !Equals [ !Ref shouldDeployService, 'true' ] +Resources: + jhipsterRunRole: + Type: 'AWS::IAM::Role' + Properties: + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - ecs-tasks.amazonaws.com + Action: + - 'sts:AssumeRole' + Path: / + jhipsterRunPolicy: + Type: "AWS::IAM::Policy" + DependsOn: + - jhipsterRunRole + - JHipsterContainerRegistry + - JHipsterLogGroup + Properties: + PolicyName: "jhipsterRunRole" + PolicyDocument: + Version: "2012-10-17" + Statement: + # ECR Permissions + - + Effect: "Allow" + Action: + - "ecr:GetAuthorizationToken" + Resource: '*' + - + Effect: "Allow" + Action: + - "ecr:BatchCheckLayerAvailability" + - "ecr:GetDownloadUrlForLayer" + - "ecr:GetRepositoryPolicy" + - "ecr:DescribeRepositories" + - "ecr:ListImages" + - "ecr:DescribeImages" + - "ecr:BatchGetImage" + Resource: !GetAtt JHipsterContainerRegistry.Arn + # LogStream Persmissions + - + Effect: "Allow" + Action: + - "logs:PutLogEvents" + - "logs:CreateLogStream" + Resource: !GetAtt JHipsterLogGroup.Arn + - Effect: "Allow" + Action: + - "ssm:GetParameter" + - "ssm:GetParametersByPath" + Resource: !Join ['', ['arn:aws:ssm:',!Ref 'AWS::Region',':', !Ref 'AWS::AccountId', ':parameter/', !Ref parentStackName,'/*' ]] + Roles: + - !Ref jhipsterRunRole + JHipsterInternalSG: + Type: 'AWS::EC2::SecurityGroup' + Properties: + GroupDescription: Internal JHipster Container Security Group + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: '8080' + ToPort: '8080' + CidrIp: !Ref vpcCIDR + VpcId: !Ref vpcID + JHipsterDatabaseSG: + Type: 'AWS::EC2::SecurityGroup' + DependsOn: + - JHipsterInternalSG + Properties: + GroupDescription: Internal JHIpster Database Secuirty Group + SecurityGroupIngress: + - IpProtocol: icmp + FromPort: -1 + ToPort: -1 + SourceSecurityGroupId: !GetAtt JHipsterInternalSG.GroupId + - IpProtocol: tcp + FromPort: 0 + ToPort: 65535 + SourceSecurityGroupId: !GetAtt JHipsterInternalSG.GroupId + - IpProtocol: udp + FromPort: 0 + ToPort: 65535 + SourceSecurityGroupId: !GetAtt JHipsterInternalSG.GroupId + VpcId: !Ref vpcID + JHipsterContainerRegistry: + Type: 'AWS::ECR::Repository' + Properties: + RepositoryName: !Ref repositoryName + JHipsterLogGroup: + Type: 'AWS::Logs::LogGroup' + Properties: + RetentionInDays: 7 +# Database + JHipsterDBSubnetGroup: + Type: AWS::RDS::DBSubnetGroup + Properties: + DBSubnetGroupDescription: "JHipster Aurora DB Cluster" + SubnetIds: !Ref appSubnets + JHipsterRDSCluster: + Type: AWS::RDS::DBCluster + DependsOn: + - JHipsterDatabaseSG + - JHipsterRDSClusterParamGroup + DeletionPolicy: Delete + Properties: + DatabaseName: !Ref databaseName + MasterUsername: !Ref databaseUsername + MasterUserPassword: !Ref databasePassword + DBClusterParameterGroupName: !Ref JHipsterRDSClusterParamGroup + VpcSecurityGroupIds: + - !GetAtt JHipsterDatabaseSG.GroupId + # need to toggle between aurora, aurora-postgresql + Engine: <%= app.auroraEngine %> + DBSubnetGroupName: + Ref: JHipsterDBSubnetGroup + JHipsterRDSClusterParamGroup: + Type: AWS::RDS::DBClusterParameterGroup + Properties: + Description: JHipster Default Cluster Param Group + Family: <%= app.auroraFamily %> + Parameters: + <%= app.auroraClusterParam %> +<% for (inst = 1; inst <= app.database.instances; inst++) { %> + JHipsterRDSDBInstance<%= inst %>: + Type: AWS::RDS::DBInstance + DeletionPolicy: Delete + Properties: + DBSubnetGroupName: !Ref JHipsterDBSubnetGroup + DBParameterGroupName: !Ref JHipsterRDSDBParameterGroup + Engine: <%= app.auroraEngine %> + DBClusterIdentifier: !Ref JHipsterRDSCluster + DBInstanceClass: !Ref databaseSize<%} %> + JHipsterRDSDBParameterGroup: + Type: AWS::RDS::DBParameterGroup + Properties: + Description: CloudFormation Sample Aurora Parameter Group + Family: <%= app.auroraFamily %> + Parameters: + <%= app.auroraDbParam %> + JHipsterMonoTask: + Type: 'AWS::ECS::TaskDefinition' + DependsOn: + - JHipsterCluster + - jhipsterRunRole + Properties: + Cpu: !Ref fargateCPU + Memory: !Ref fargateMemory + RequiresCompatibilities: + - FARGATE + TaskRoleArn: !Join ['', ['arn:aws:iam::', !Ref 'AWS::AccountId', ':role/', !Ref jhipsterRunRole ]] + #This role is required by Fargate tasks to pull container images and publish container logs to Amazon CloudWatch on your behalf. + ExecutionRoleArn: !Join ['', ['arn:aws:iam::', !Ref 'AWS::AccountId', ':role/', !Ref jhipsterRunRole ]] + NetworkMode: awsvpc + ContainerDefinitions: + - Name: JHipsterMonolith + Image: !Join [ '', [ !Ref 'AWS::AccountId','.dkr.ecr.',!Ref 'AWS::Region','.amazonaws.com/', !Ref repositoryName ] ] + Cpu: !Ref fargateCPU + PortMappings: + - ContainerPort: 8080 + LogConfiguration: + LogDriver: awslogs + Options: + awslogs-group: !Ref JHipsterLogGroup + awslogs-region: !Ref 'AWS::Region' + awslogs-stream-prefix: awslogs-jhipster + Environment: + - Name: JAVA_OPTS + Value: '-Djava.net.preferIPv4Stack=true -Djava.net.preferIPv4Addresses' + - Name: SPRING_PROFILES_ACTIVE + Value: 'prod,swagger,aws' + - Name: SPRING_DATASOURCE_URL + Value: !Join ['',['jdbc:', <%= app.dbType %> ,'://',!GetAtt JHipsterRDSCluster.Endpoint.Address ,':', !GetAtt JHipsterRDSCluster.Endpoint.Port,'/', !Ref databaseName]] + - Name: SPRING_DATASOURCE_USERNAME + Value: !Ref databaseUsername +# - Name: SPRING_DATASOURCE_PASSWORD +# Value: !Ref databasePassword + - Name: JHIPSTER_SLEEP + Value: '0' + - Name: CLOUD_AWS_STACK_NAME + Value: !Ref parentStackName + MemoryReservation: '1024' + Essential: 'true' + JHipsterCluster: + Type: 'AWS::ECS::Cluster' + Properties: {} + JHipsterLoadBalancer: + Type: 'AWS::ElasticLoadBalancingV2::LoadBalancer' + Properties: + Scheme: internet-facing + Type: network + Subnets: !Ref elbSubnets + JHIpsterALBTargetGroup: + Type: 'AWS::ElasticLoadBalancingV2::TargetGroup' + Properties: + Port: 80 + Protocol: TCP + TargetType: ip + VpcId: !Ref vpcID + HealthCheckIntervalSeconds: 30 + JHIpsterALBListener: + Type: 'AWS::ElasticLoadBalancingV2::Listener' + Properties: + DefaultActions: + - Type: forward + TargetGroupArn: !Ref JHIpsterALBTargetGroup + LoadBalancerArn: !Ref JHipsterLoadBalancer + Port: '80' + Protocol: TCP + JHipsterAppService: + Type: 'AWS::ECS::Service' + DependsOn: + - JHIpsterALBListener + - JHIpsterALBTargetGroup + - jhipsterRunRole + Condition: DeployService + Properties: + Cluster: !Ref JHipsterCluster + DesiredCount: !Ref TaskCount + LaunchType: FARGATE + NetworkConfiguration: + AwsvpcConfiguration: + AssignPublicIp: !Ref AppDeployUsingPublicIP + SecurityGroups: + - !GetAtt JHipsterInternalSG.GroupId + - !GetAtt JHipsterDatabaseSG.GroupId + Subnets: !Ref appSubnets + LoadBalancers: + - ContainerName: JHipsterMonolith + ContainerPort: 8080 + TargetGroupArn: !Ref JHIpsterALBTargetGroup + TaskDefinition: !Ref JHipsterMonoTask +Outputs: + LoadBalancerOutput: + Description: The external DNS address of the load-balanancer + Value: !GetAtt JHipsterLoadBalancer.DNSName diff --git a/generators/aws-containers/templates/_base.template.yml b/generators/aws-containers/templates/_base.template.yml new file mode 100644 index 000000000000..e7f6bc576317 --- /dev/null +++ b/generators/aws-containers/templates/_base.template.yml @@ -0,0 +1,35 @@ +AWSTemplateFormatVersion: 2010-09-09 +Parameters: + applicationStackS3Bucket: + Type: String + Description: Name of the S3 bucket containing the application.template.yml template file + Default: <%= aws.s3BucketName %> +<% for (app of appConfigs) { %> <%= app.baseName %>DBPassword: + Type: String + Description: Aurora master database password for <%= app.baseName %> + NoEcho: true +<%} %> + shouldDeployService: + Type: String + Description: Should the ECS service be initialised + AllowedValues: + - true + - false + Default: false +Resources: +<% for (app of appConfigs) { %> <%= app.baseName %>: + Type: 'AWS::CloudFormation::Stack' + Properties: + Parameters: + parentStackName: !Ref AWS::StackName + repositoryName: <%= app.baseName %> + shouldDeployService: !Ref shouldDeployService + databasePassword: !Ref <%= app.baseName %>DBPassword + TemplateURL: !Join [ '', [ 'https://s3.amazonaws.com/',!Ref applicationStackS3Bucket, '/<%= app.baseName %>.template.yml'] ] + TimeoutInMinutes: 5 +<%} %> +Outputs: +<% for (app of appConfigs) { %> LoadBalancerOutput<%= app.baseName %>: + Description: The external DNS address of the load-balanancers + Value: !GetAtt <%= app.baseName %>.Outputs.LoadBalancerOutput +<%} %> diff --git a/generators/aws-containers/templates/_bootstrap-aws.yml b/generators/aws-containers/templates/_bootstrap-aws.yml new file mode 100644 index 000000000000..906818245ceb --- /dev/null +++ b/generators/aws-containers/templates/_bootstrap-aws.yml @@ -0,0 +1,16 @@ +# =================================================================== +# Spring Cloud Config bootstrap configuration for the "aws" profile +# =================================================================== + +spring: + application: + name: <%= temp.baseName %> + cloud: + config: + fail-fast: true + +cloud: + aws: + stack: + name: <%= temp.baseName %> + auto: false diff --git a/generators/aws-containers/templates/_spring.factories b/generators/aws-containers/templates/_spring.factories new file mode 100644 index 000000000000..d8cbe27b6654 --- /dev/null +++ b/generators/aws-containers/templates/_spring.factories @@ -0,0 +1 @@ +org.springframework.cloud.bootstrap.BootstrapConfiguration=<%=temp.packageName%>.bootstrap.AwsSSMConfiguration diff --git a/generators/docker-base.js b/generators/docker-base.js index d3cad22bbce2..2a842ac84f46 100644 --- a/generators/docker-base.js +++ b/generators/docker-base.js @@ -1,4 +1,3 @@ - /** * Copyright 2013-2018 the original author or authors from the JHipster project. * @@ -20,7 +19,7 @@ const shelljs = require('shelljs'); const chalk = require('chalk'); const crypto = require('crypto'); - +const dockerUtils = require('./docker-utils'); /** * This is the Generator base class. * This provides all the public API methods exposed via the module system. @@ -29,7 +28,7 @@ const crypto = require('crypto'); * The method signatures in public API should not be changed without a major version change */ module.exports = { - checkDocker, + checkDocker: dockerUtils.checkDocker, checkImages, generateJwtSecret, configureImageNames, diff --git a/generators/docker-cli.js b/generators/docker-cli.js new file mode 100644 index 000000000000..4000f142dd66 --- /dev/null +++ b/generators/docker-cli.js @@ -0,0 +1,122 @@ +const _ = require('lodash'); +const exec = require('child_process').exec; + +/** + * This is the DockerCli object. it allows Yeoman to interact with Docker via Docker CLI. + * NB: It is vital that these functions are bound to the generator context. + */ +module.exports = { + setOutputs, + command, + getImageID, + tagImage, + loginToAws, + pushImage +}; + +let stdOut = data => console.log(data.toString().trim()); +let stdErr = data => console.error(data.toString().trim()); +function setOutputs(stdout, stderr) { + stdOut = stdout; + stdErr = stderr; +} + +/** + * Execute the shell command given as a parameter and execute the callback at the end. Callback has profile: + * `function(err, stdout, stderr)` + * @param cmd the command to execute + * @param cb the callback that will be called after the function is executed. + * @param opts additional options + * @attr silent flag to deactivate the live stderr and stdout. Default to false + * @attr maxBuffer value of the buffer to store the live outputs. Default to 10240000 + */ +function command(cmd, cb, opts = {}) { + const options = Object.assign( + {}, + { + silent: false, + maxBuffer: 10240000 + }, + opts + ); + const command = exec(`${cmd}`, { maxBuffer: options.maxBuffer }, cb); + + if (!options.silent) { + command.stdout.on('data', stdOut); + command.stderr.on('data', stdErr); + } +} + +/** + * + * @param imageName the image name + * @param tag the image tag (optional) + * @returns {Promise} returns the image ID on success, an error message on failure (exception or noId). + */ +function getImageID(imageName, tag) { + const dockerNameTag = `${imageName}${_.isNil(tag) ? '' : `:${tag}`}`; + const commandLine = `docker image ls --quiet ${dockerNameTag}`; + + return new Promise((resolve, reject) => + command(commandLine, (err, stdout) => { + if (err) { + reject(err); + } + const dockerID = _.trim(stdout); + if (_.isEmpty(dockerID)) { + reject(new Error(`No Docker ID found for ${dockerNameTag}`)); + } else { + resolve(dockerID); + } + }, { silent: true })); +} + +function tagImage(from, to) { + const commandLine = `docker tag ${from} ${to}`; + + return new Promise((resolve, reject) => + command(commandLine, (err, stdout) => { + if (err) { + reject(err); + } + resolve(stdout); + }, { silent: true })); +} + +/** + * Log docker to AWS. + * @param region + * @param accountId + * @param username + * @param password + * @returns {Promise} + */ +function loginToAws(region, accountId, username, password) { + const commandLine = `docker login --username AWS --password ${password} https://${accountId}.dkr.ecr.${region}.amazonaws.com`; + return new Promise( + (resolve, reject) => + command(commandLine, (err, stdout) => { + if (err) { + reject(err); + } + resolve(stdout); + }), + { silent: true } + ); +} + +/** + * Pushes the locally constructed Docker image to the supplied respository + * @param repository tag, for example: 111111111.dkr.ecr.us-east-1.amazonaws.com/sample + * @returns {Promise} + */ +function pushImage(repository) { + const commandLine = `docker push ${repository}`; + return new Promise((resolve, reject) => + command(commandLine, (err, stdout) => { + if (err) { + reject(err); + } + resolve(stdout); + })); +} diff --git a/generators/docker-prompts.js b/generators/docker-prompts.js index 229ae1f61dcb..b3b4e7824e30 100644 --- a/generators/docker-prompts.js +++ b/generators/docker-prompts.js @@ -30,7 +30,8 @@ module.exports = { askForServiceDiscovery, askForAdminPassword, askForDockerRepositoryName, - askForDockerPushCommand + askForDockerPushCommand, + loadConfigs }; /** @@ -144,6 +145,7 @@ function askForPath() { }); } + /** * Ask For Apps */ @@ -165,32 +167,41 @@ function askForApps() { this.prompt(prompts).then((props) => { this.appsFolders = props.chosenApps; - this.appConfigs = []; - this.gatewayNb = 0; - this.monolithicNb = 0; - this.microserviceNb = 0; - - // Loading configs - this.appsFolders.forEach((appFolder) => { - const path = this.destinationPath(`${this.directoryPath + appFolder}/.yo-rc.json`); - const fileData = this.fs.readJSON(path); - const config = fileData['generator-jhipster']; - - if (config.applicationType === 'monolith') { - this.monolithicNb++; - } else if (config.applicationType === 'gateway') { - this.gatewayNb++; - } else if (config.applicationType === 'microservice') { - this.microserviceNb++; - } - - this.portsToBind = this.monolithicNb + this.gatewayNb; - this.appConfigs.push(config); - }); + loadConfigs.call(this); done(); }); } +/* + * Load config from this.appFolders + * TODO: Extracted from AdForApps. Move into utils? + */ +function loadConfigs() { + this.appConfigs = []; + this.gatewayNb = 0; + this.monolithicNb = 0; + this.microserviceNb = 0; + + // Loading configs + this.appsFolders.forEach((appFolder) => { + const path = this.destinationPath(`${this.directoryPath + appFolder}/.yo-rc.json`); + const fileData = this.fs.readJSON(path); + const config = fileData['generator-jhipster']; + + if (config.applicationType === 'monolith') { + this.monolithicNb++; + } else if (config.applicationType === 'gateway') { + this.gatewayNb++; + } else if (config.applicationType === 'microservice') { + this.microserviceNb++; + } + + this.portsToBind = this.monolithicNb + this.gatewayNb; + config.appFolder = appFolder; + this.appConfigs.push(config); + }); +} + /** * Ask For Clusters Mode */ @@ -307,7 +318,10 @@ function askForServiceDiscovery() { const serviceDiscoveryEnabledApps = []; this.appConfigs.forEach((appConfig, index) => { if (appConfig.serviceDiscoveryType) { - serviceDiscoveryEnabledApps.push({ baseName: appConfig.baseName, serviceDiscoveryType: appConfig.serviceDiscoveryType }); + serviceDiscoveryEnabledApps.push({ + baseName: appConfig.baseName, + serviceDiscoveryType: appConfig.serviceDiscoveryType + }); } }); diff --git a/generators/docker-utils.js b/generators/docker-utils.js new file mode 100644 index 000000000000..8e447ac476c3 --- /dev/null +++ b/generators/docker-utils.js @@ -0,0 +1,97 @@ +const shelljs = require('shelljs'); +const chalk = require('chalk'); +const dockerCLI = require('./docker-cli'); +/** + * This is the Generator base class. + * This provides all the public API methods exposed via the module system. + * The public API methods can be directly utilized as well using commonJS require. + * + * The method signatures in public API should not be changed without a major version change + */ +module.exports = { + checkDocker, + checkImageExist, + checkAndBuildImages, +}; + +/** + * Check that Docker exists. + * @param failOver flag + */ +function checkDocker() { + if (this.abort) return; + const done = this.async(); + + shelljs.exec('docker -v', { silent: true }, (code, stdout, stderr) => { + if (stderr) { + this.log(chalk.red('Docker version 1.10.0 or later is not installed on your computer.\n' + + ' Read http://docs.docker.com/engine/installation/#installation\n')); + this.abort = true; + } else { + const dockerVersion = stdout.split(' ')[2].replace(/,/g, ''); + const dockerVersionMajor = dockerVersion.split('.')[0]; + const dockerVersionMinor = dockerVersion.split('.')[1]; + if (dockerVersionMajor < 1 || (dockerVersionMajor === 1 && dockerVersionMinor < 10)) { + this.log(chalk.red(`${'Docker version 1.10.0 or later is not installed on your computer.\n' + + ' Docker version found: '}${dockerVersion}\n` + + ' Read http://docs.docker.com/engine/installation/#installation\n')); + this.abort = true; + } else { + this.log.ok('Docker is installed'); + } + } + done(); + }); +} + +/** + * Check that a Docker image exists in a JHipster app. + * + * @param opts Options to pass. + * @property pwd JHipster app directory. default is './' + * @property appConfig Configuration for the current application + */ +function checkImageExist(opts = { cwd: './', appConfig: null }) { + if (this.abort) return; + + let imagePath = ''; + this.warning = false; + this.warningMessage = 'To generate the missing Docker image(s), please run:\n'; + if (opts.appConfig.buildTool === 'maven') { + imagePath = this.destinationPath(`${opts.cwd + opts.cwd}/target/docker`); + this.dockerBuildCommand = './mvnw verify -Pprod dockerfile:build'; + } else { + imagePath = this.destinationPath(`${opts.cwd + opts.cwd}/build/docker`); + this.dockerBuildCommand = './gradlew -Pprod bootRepackage buildDocker'; + } + + if (shelljs.ls(imagePath).length === 0) { + this.warning = true; + this.warningMessage += ` ${chalk.cyan(this.dockerBuildCommand)} in ${this.destinationPath(this.directoryPath + opts.cwd)}\n`; + } +} + +/** + * Check that a Docker image exists (using {@link #checkImageExists} and if the user agrees, rebuild it. + * @param opts + * @property pwd JHipster app directory. default is './' + * @property forceBuild flag to force the image build. + * @property appConfig Configuration for the current application + * @returns {Promise.|Promise} + */ +function checkAndBuildImages(opts = { cwd: './', forceBuild: false, appConfig: { buildTool: 'gradle' } }) { + if (this.abort) return null; + checkImageExist.call(this, opts); + const pwd = shelljs.pwd(); + shelljs.cd(opts.cwd); + return new Promise((resolve, reject) => dockerCLI.command(`${opts.cwd}${this.dockerBuildCommand}`, (err) => { + + shelljs.cd(pwd); + if (err) { + this.log.error(chalk.red(`The Docker image build failed. ${err}`)); + this.abort = true; + reject(); + } + resolve(); + })); +} diff --git a/generators/generator-base.js b/generators/generator-base.js index 727b4302a053..7bd6940a368a 100644 --- a/generators/generator-base.js +++ b/generators/generator-base.js @@ -1257,7 +1257,20 @@ module.exports = class extends PrivateBase { * @param {string} other - (optional) explicit other thing: scope, exclusions... */ addMavenDependency(groupId, artifactId, version, other) { - const fullPath = 'pom.xml'; + this.addMavenDependencyInDirectory('.', groupId, artifactId, version, other); + } + + /** + * Add a new Maven dependency in a specific folder.. + * + * @param {string} directory - the folder to add the dependency in + * @param {string} groupId - dependency groupId + * @param {string} artifactId - dependency artifactId + * @param {string} version - (optional) explicit dependency version number + * @param {string} other - (optional) explicit other thing: scope, exclusions... + */ + addMavenDependencyInDirectory(directory, groupId, artifactId, version, other) { + const fullPath = `${directory}/pom.xml`; try { let dependency = `${'\n' + ' '}${groupId}\n` + @@ -1426,7 +1439,19 @@ module.exports = class extends PrivateBase { * @param {string} version - (optional) explicit dependency version number */ addGradleDependency(scope, group, name, version) { - const fullPath = 'build.gradle'; + this.addGradleDependencyInDirectory('.', scope, group, name, version); + } + + /** + * A new dependency to build.gradle file in a specific folder. + * + * @param {string} scope - scope of the new dependency, e.g. compile + * @param {string} group - maven GroupId + * @param {string} name - maven ArtifactId + * @param {string} version - (optional) explicit dependency version number + */ + addGradleDependencyInDirectory(directory, scope, group, name, version) { + const fullPath = `${directory}/build.gradle`; let dependency = `${group}:${name}`; if (version) { dependency += `:${version}`; diff --git a/generators/utils.js b/generators/utils.js index 10454a00edbe..7504117dfe68 100644 --- a/generators/utils.js +++ b/generators/utils.js @@ -37,7 +37,8 @@ module.exports = { deepFind, getJavadoc, buildEnumInfo, - copyObjectProps + copyObjectProps, + decodeBase64 }; /** @@ -350,3 +351,12 @@ function buildEnumInfo(field, angularAppName, packageName, clientRootFolder) { function copyObjectProps(toObj, fromObj) { Object.assign(toObj, fromObj); } + +/** + * Decode the given string from base64 to said encoding. + * @param string the base64 string to decode + * @param encoding the encoding to decode into. default to 'utf-8' + */ +function decodeBase64(string, encoding = 'utf-8') { + return Buffer.from(string, 'base64').toString(encoding); +}