Skip to content

Commit

Permalink
new aws-containers sub-generator
Browse files Browse the repository at this point in the history
New AWS sub-generator which uses Amazon ECS to run JHipster monolithic applications.

Closes jhipster#6773
  • Loading branch information
Gerard Gigliotti committed Jan 22, 2018
1 parent d82f8a2 commit 1931559
Show file tree
Hide file tree
Showing 21 changed files with 2,456 additions and 54 deletions.
3 changes: 3 additions & 0 deletions cli/commands.js
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@ module.exports = {
aws: {
desc: 'Deploy the current application to Amazon Web Services'
},
'aws-containers': {
desc: 'Deploy the current application to Amazon Web Services using ECS'
},
'ci-cd': {
desc: 'Create pipeline scripts for popular Continuous Integration/Continuous Deployment tools'
},
Expand Down
24 changes: 24 additions & 0 deletions generators/aws-containers/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# AWS JHipster Generator - _A new hope_

## AWS
### ECR
#### Authentication Token
The token received by ECR to authenticate is structured as follow. We'll only take the first object.
```json
{
"authorizationToken":"AUTHORIZATION_TOKEN",
"expiresAt":"2017-12-12T10:52:24.810Z",
"proxyEndpoint":"https://foo.ecr.region.amazonaws.com"
}
```

## Development
### Dependencies

```
# NPM
npm i --save-dev aws-sdk@^2.167.0 [email protected] [email protected]
# Yarn
yarn add --dev aws-sdk@^2.167.0 [email protected] [email protected]
```
5 changes: 5 additions & 0 deletions generators/aws-containers/USAGE
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
Description:
Initializes a AWS app and generates a Docker container that is ready to push to AWS.

Example:
jhipster aws-containers
299 changes: 299 additions & 0 deletions generators/aws-containers/aws-client.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,299 @@
/* global process */
const _ = require('lodash');
const fs = require('fs');
const chalk = require('chalk');
const shelljs = require('shelljs');

const utils = require('../utils');

const AwsSSM = require('./lib/ssm');
const AwsECR = require('./lib/ecr');
const AwsCF = require('./lib/cf');

const DEFAULT_REGION = 'us-east-1';
const S3_MIN_PART_SIZE = 5242880;


// Instance from aws-sdk
let AWS;
let credentials;
let ec2;
// let ecr;
let s3;
let sts;
let ora;

// Instances from ./lib. Composed with aws-sdk
let SSM;
let ECR;
let CF;

let ProgressBar;

module.exports = {
DEFAULT_REGION,
SSM: () => SSM,
ECR: () => ECR,
CF: () => CF,
createS3Bucket,
getDockerLogin,
listRegions,
listSubnets,
listVpcs,
loadAWS,
saveCredentialsInAWS,
initAwsStuff,
sanitizeBucketName,
uploadTemplate,
};

/**
* Will load the aws-sdk npm dependency if it's not already loaded.
*
* @param generator the yeoman generator it'll be loaded in.
* @returns {Promise} The promise will succeed if the aws-sdk has been loaded and fails if it couldn't be installed.
*/
function loadAWS(generator) {
return new Promise((resolve, reject) => {
try {
AWS = require('aws-sdk') // eslint-disable-line
ProgressBar = require('progress'); // eslint-disable-line
ora = require('ora'); // eslint-disable-line
} catch (e) {
generator.log('Installing AWS dependencies');
let installCommand = 'yarn add aws-sdk@^2.167.0 [email protected] [email protected]';
if (generator.config.get('clientPackageManager') === 'npm') {
installCommand = 'npm install aws-sdk@^2.167.0 [email protected] [email protected]';
}
shelljs.exec(installCommand, { silent: false }, (code) => {
if (code !== 0) {
generator.error('Something went wrong while installing the dependencies\n');
reject();
}
AWS = require('aws-sdk') // eslint-disable-line
ProgressBar = require('progress'); // eslint-disable-line
ora = require('ora'); // eslint-disable-line
});
}
resolve();
});
}

/**
* Init AWS stuff like ECR and whatnot.
*
* @param ecrConfig The config used to instanciate ECR
*/
function initAwsStuff(region = DEFAULT_REGION) {
ec2 = new AWS.EC2({ region });
// ecr = new AWS.ECR({ region });
s3 = new AWS.S3();
sts = new AWS.STS();

SSM = new AwsSSM(region);
ECR = new AwsECR(region);
CF = new AwsCF(region);
}

/**
* Wraps the promise in a CLI spinner
* @param promise
*/
function spinner(promise, text = 'loading', spinnerIcon = 'monkey') {
const spinner = ora({ spinner: spinnerIcon, text }).start();
return new Promise((resolve, reject) => {
promise.then((resolved) => {
spinner.stop();
resolve(resolved);
}).catch((err) => {
spinner.stop();
reject(err);
});
});
}


/**
* listRegions() returns a Promise, which resolves to an array of AWS region objects,
* with "Endpoint" and "RegionName" properties
*
* @param region to use. Defaults to us-east-1
* @returns {Promise<EC2.Region[]>}
*/
function listRegions() {
return spinner(ec2.describeRegions({})
.promise()
.then(data => data.Regions));
}

/**
* listVpcs(region) returns a Promise, which resolves to an array of AWS VPC objects
* @param region to look for VPCs
* @returns {Promise<EC2.Vpc[]>}
*/
function listVpcs() {
return spinner(ec2.describeVpcs({})
.promise()
.then(data => data.Vpcs));
}

/**
* listSubnets(vpcId) returns a Promise, which resolves to an array of
* Subnets available within the supplied VPC
* @param region to look for subnets
* @param vpcId of the VPC with the subnets
* @returns {Promise<EC2.Subnet[]>}
*/
function listSubnets(vpcId) {
const params = {
Filters: [
{
Name: 'vpc-id',
Values: [vpcId]
}, {
Name: 'state',
Values: ['available']
}
]
};
return spinner(ec2.describeSubnets(params)
.promise().then(data => data.Subnets));
}

/**
* Get the credentials from the ~/.aws/credentials file using the AWS_PROFILE env var to get the profile.
*
* @param profile The AWS profile to get the credentials from. Default to 'default'
* @returns {Promise} Will resolve with no parameters if it succeeds, rejects with the error if it fails (no credentials found for given profile.
*/
function saveCredentialsInAWS(profile = 'default') {
credentials = new AWS.SharedIniFileCredentials({ profile });
return new Promise((resolve, reject) => credentials.refresh((err) => {
if (err) {
reject(err);
}
AWS.config.credentials = credentials;
resolve();
}));
}

/**
* Retrieve decoded information to authenticate to Docker with AWS credentials.
* @returns {Promise} Returns a promise that resolves when the informations are retrieved.
*/
function getDockerLogin() {
return spinner(new Promise((resolve, reject) => _getAuthorizationToken()
.then(authToken => sts.getCallerIdentity({}).promise()
.then((data) => {
const decoded = utils.decodeBase64(authToken.authorizationToken);
const splitResult = _.split(decoded, ':');
resolve({
username: splitResult[0],
password: splitResult[1],
accountId: data.Account
});
})
.catch(() => reject(new Error('Couldn\'t retrieve the user informations'))))));
}

/**
* Fetch Authentication token from AWS to authenticate with Docker
* @returns {Promise} Returns a promise that resolves when the informations are retrieved.
* @private
*/
function _getAuthorizationToken() {
return spinner(new Promise((resolve, reject) =>
ECR.sdk.getAuthorizationToken({}).promise()
.then((data) => {
if (!_.has(data, 'authorizationData.0')) {
reject(new Error('No authorization data found.'));
return;
}
resolve(data.authorizationData[0]);
})));
}

/**
* Create a S3 Bucket in said region with said bucketBaseName.
* the bucketBaseName will be used to create a
* @param bucketName the name of the bucket to create.
* @param region the region to create the bucket in.
* @returns {Promise}
*/
function createS3Bucket(bucketName, region = DEFAULT_REGION) {
const createBuckerParams = {
Bucket: bucketName
};
return spinner(new Promise((resolve, reject) => s3.headBucket({
Bucket: bucketName
}).promise()
.catch((error) => {
if (error.code !== 'NotFound') {
reject(new Error(`The S3 Bucket ${chalk.bold(bucketName)} in region ${chalk.bold(region)} already exists and you don't have access to it. Error code: ${chalk.bold(error.code)}`));
}
})
.then(() =>
s3.createBucket(createBuckerParams).promise()
.then(resolve)
.catch(error => reject(new Error(`There was an error during the creation of the S3 Bucket ${chalk.bold(bucketName)} in region ${chalk.bold(region)}`))))));
}

/**
* Upload the template in the S3Bucket
* @param bucketName S3 Bucket name to upload the template into
* @param filename Name to give to the file in the Bucket
* @param path Path to the file
* @returns {Promise}
*/
function uploadTemplate(bucketName, filename, path) {
return spinner(new Promise((resolve, reject) =>
fs.stat(path, (error, stats) => {
if (_.isNil(stats)) {
reject(new Error(`File ${chalk.bold(path)} not found`));
}
const upload = s3.upload(
{
Bucket: bucketName,
Key: filename,
Body: fs.createReadStream(path)
},
{
partSize: Math.max(stats.size, S3_MIN_PART_SIZE),
queueSize: 1
}
);
let bar;
upload.on('httpUploadProgress', (evt) => {
if (bar === undefined && evt.total) {
const total = evt.total / 1000000;
bar = new ProgressBar('uploading [:bar] :percent :etas', {
complete: '=',
incomplete: ' ',
width: 20,
total,
clear: true
});
}

const curr = evt.loaded / 1000000;
bar.tick(curr - bar.curr);
});
return upload.promise()
.then(resolve)
.catch(reject);
})));
}

/**
* Sanitize the bucketName following the rule found here:
* http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html
* @param bucketName
* @returns {string}
*/
function sanitizeBucketName(bucketName) {
let labels = _.split(bucketName, '.');
labels = _.filter(labels, e => e);
labels = _.map(labels, _.toLower);
labels = _.map(labels, e => _.replace(e, '_', '-'));
return _.join(labels, '.');
}
Loading

0 comments on commit 1931559

Please sign in to comment.