Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/pifantastic/grunt-s3 into…
Browse files Browse the repository at this point in the history
… record-changes

* 'master' of https://github.com/pifantastic/grunt-s3:
  Log bucket and destination on upload
  Headers were broken when upload a lot of large files (and set maxOperations to 5). Could be also relevant with issue pifantastic#100.
  Mention that verify is an option and be more specific as to where it can be set
  Add documentation for the 'rel' option.
  Fixed the options in some of the documentation examples
  Update readme
  Reduce verbosity
  v0.2.0-alpha.3
  update sync messages too
  oops, missed a src
  don't bring tmp gzip path

Conflicts:
	README.md
	tasks/lib/s3.js
  • Loading branch information
coen-hyde committed Jan 26, 2014
2 parents cfbc76c + 33d3900 commit 2c712e0
Show file tree
Hide file tree
Showing 5 changed files with 50 additions and 32 deletions.
23 changes: 15 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,17 +49,22 @@ public-read-write`, `authenticated-read`, `bucket-owner-read`, `bucket-owner-ful
* **gzip** - (*boolean*) If true, uploads will be gzip-encoded.
* **gzipExclude** - (*array*) Define extensions of files you don't want to run gzip on, an array of strings ie: `['.jpg', '.jpeg', '.png']`.
* **upload** - (*array*) An array of objects, each object representing a file upload and containing a `src`
and a `dest`. Any of the above values may also be overriden. Passing `rel:DIR` will cause the filesnames to be
expanded so that wild cards are not passed to the source name.
and a `dest`. Any of the above values may also be overriden.

Passing `rel:DIR` will:
- Cause the filenames to be expanded relative to some relative or absolute path on the filesystem (`DIR`). This operation is exclusive of `DIR`, i.e., `DIR` itself will not be included in the expansion.
- Cause wildcards in 'src' to be replaced with actual paths and/or filenames.

* **download** - (*array*) An array of objects, each object representing a file download and containing a
`src` and a `dest`. Any of the above values may also be overriden.
* **del** - (*array*) An array of objects, each object containing a `src` to delete from s3. Any of
the above values may also be overriden.
* **sync** - (*array*) An array of ojects, each oject containing a `src` and `dest`. Default behavior is to
only upload new files (that don't exist). Adding `verify:true` forces an MD5 hash and Modified time check prior
to overwriting the server files.
* **sync** - (*array*) An array of ojects, each oject containing a `src` and `dest`. Default behavior is to only upload new files (that don't exist). Set a key called `verify` with the value `true` on this object's options property (i.e. `options: {verify: true}`) to upload existing files if and only if they are newer than the versions of those same files on the server. This is implemented via an MD5 hash and by checking the modified times of the files.
* **trackChanges** - (*boolean*) Default `false`. If true, an array of changed assets can be retrieved from `grunt.config('s3.changed')`
* **debug** - (*boolean*) If true, no transfers with S3 will occur, will print all actions for review by user
* **logSuccess** - (*boolean*) If false, output for successful transfers will be ignored. Default: true
* **logErrors** - (*boolean*) If false, output for failed transfers will be ignored. Default: true


### Example

Expand Down Expand Up @@ -101,8 +106,10 @@ grunt.initConfig({
dest: 'documents/ignore.txt',

// These values will override the above settings.
bucket: 'some-specific-bucket',
access: 'authenticated-read'
options: {
bucket: 'some-specific-bucket',
access: 'authenticated-read'
}
},
{
// Wildcards are valid *for uploads only* until I figure out a good implementation
Expand Down Expand Up @@ -144,7 +151,7 @@ grunt.initConfig({
},
{
// make sure this document is newer than the one on S3 and replace it
verify: true,
options: { verify: true },
src: 'passwords.txt',
dest: 'documents/ignore.txt'
},
Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "grunt-s3",
"description": "A grunt task to automate moving files to/from Amazon S3.",
"version": "0.2.0-alpha.2",
"version": "0.2.0-alpha.3",
"author": "Aaron Forsander (https://github.com/pifantastic)",
"homepage": "https://github.com/pifantastic/grunt-s3",
"repository": {
Expand Down
12 changes: 9 additions & 3 deletions tasks/lib/S3Task.js
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,16 @@ S3Task.prototype = {
var transfer = transferFn();

transfer.done(function (msg) {
grunt.log.ok(msg);
if (config.logSuccess) {
grunt.log.ok(msg);
}
completed();
});

transfer.fail(function (msg) {
grunt.log.error(msg);
if (config.logErrors) {
grunt.log.error(msg);
}
++errors;
completed();
});
Expand Down Expand Up @@ -137,7 +141,9 @@ S3Task.prototype = {
debug: false,
verify: false,
maxOperations: 0,
encodePaths: false
encodePaths: false,
logSuccess: true,
logErrors: true
};

// Grab the actions to perform from the task data, default to empty arrays
Expand Down
43 changes: 23 additions & 20 deletions tasks/lib/s3.js
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ var existsSync = ('existsSync' in fs) ? fs.existsSync : path.existsSync;
/**
* Success/error messages.
*/
var MSG_UPLOAD_SUCCESS = '↗'.blue + ' Uploaded: %s (%s)';
var MSG_UPLOAD_SUCCESS = '↗'.blue + ' Uploaded: %s to %s:%s (%s)';
var MSG_DOWNLOAD_SUCCESS = '↙'.yellow + ' Downloaded: %s (%s)';
var MSG_DELETE_SUCCESS = '✗'.red + ' Deleted: %s';
var MSG_COPY_SUCCESS = '→'.cyan + ' Copied: %s to %s';
Expand Down Expand Up @@ -117,13 +117,14 @@ exports.init = function (grunt) {
exports.put = exports.upload = function (src, dest, opts) {
var dfd = new _.Deferred();
var options = makeOptions(opts);
var prettySrc = path.relative(process.cwd(), src);

// Make sure the local file exists.
if (!existsSync(src)) {
return dfd.reject(makeError(MSG_ERR_NOT_FOUND, src));
return dfd.reject(makeError(MSG_ERR_NOT_FOUND, prettySrc));
}

var headers = options.headers || {};
var headers = _.clone(options.headers || {});

if (options.access) {
headers['x-amz-acl'] = options.access;
Expand All @@ -133,7 +134,7 @@ exports.init = function (grunt) {
var client = makeClient(options);

if (options.debug) {
return dfd.resolve(util.format(MSG_UPLOAD_DEBUG, path.relative(process.cwd(), src), client.bucket, dest)).promise();
return dfd.resolve(util.format(MSG_UPLOAD_DEBUG, prettySrc, client.bucket, dest)).promise();
}

// Encapsulate this logic to make it easier to gzip the file first if
Expand All @@ -146,13 +147,13 @@ exports.init = function (grunt) {
// If there was an upload error or any status other than a 200, we
// can assume something went wrong.
if (err || res.statusCode !== 200) {
cb(makeError(MSG_ERR_UPLOAD, src, err || res.statusCode));
cb(makeError(MSG_ERR_UPLOAD, prettySrc, err || res.statusCode));
}
else {
// Read the local file so we can get its md5 hash.
fs.readFile(src, function (err, data) {
if (err) {
cb(makeError(MSG_ERR_UPLOAD, src, err));
cb(makeError(MSG_ERR_UPLOAD, prettySrc, err));
}
else {
// The etag head in the response from s3 has double quotes around
Expand All @@ -163,14 +164,14 @@ exports.init = function (grunt) {
var localHash = crypto.createHash('md5').update(data).digest('hex');

if (remoteHash === localHash) {
var msg = util.format(MSG_UPLOAD_SUCCESS, src, localHash);
var msg = util.format(MSG_UPLOAD_SUCCESS, prettySrc, client.bucket, dest, localHash);
if (options.trackChanges) {
trackChanges(dest);
}
cb(null, msg);
}
else {
cb(makeError(MSG_ERR_CHECKSUM, 'Upload', localHash, remoteHash, src));
cb(makeError(MSG_ERR_CHECKSUM, 'Upload', localHash, remoteHash, prettySrc));
}
}
});
Expand Down Expand Up @@ -203,11 +204,12 @@ exports.init = function (grunt) {
// Gzip the file and upload when done.
input.pipe(zlib.createGzip()).pipe(output)
.on('error', function (err) {
dfd.reject(makeError(MSG_ERR_UPLOAD, src, err));
dfd.reject(makeError(MSG_ERR_UPLOAD, prettySrc, err));
})
.on('close', function () {
// Update the src to point to the newly created .gz file.
src = tmp.path;
prettySrc += ' (gzip)';
upload(function (err, msg) {
// Clean up the temp file.
tmp.unlinkSync();
Expand Down Expand Up @@ -406,12 +408,13 @@ exports.init = function (grunt) {
exports.sync = function (src, dest, opts) {
var dfd = new _.Deferred();
var options = makeOptions(opts);
var prettySrc = path.relative(process.cwd(), src);

// Pick out the configuration options we need for the client.
var client = makeClient(options);

if (options.debug) {
return dfd.resolve(util.format(MSG_SKIP_DEBUG, client.bucket, src)).promise();
return dfd.resolve(util.format(MSG_SKIP_DEBUG, client.bucket, prettySrc)).promise();
}

// Check for the file on s3
Expand All @@ -424,16 +427,16 @@ exports.init = function (grunt) {
upload = exports.upload( src, dest, opts);
// pass through the dfd state
return upload.then( dfd.resolve, dfd.reject );
}
}

if (!res || err || res.statusCode !== 200 ) {
return dfd.reject(makeError(MSG_ERR_DOWNLOAD, src, err || res.statusCode));
}
return dfd.reject(makeError(MSG_ERR_DOWNLOAD, prettySrc, err || res.statusCode));
}

// we do not wish to overwrite a file that exists by verifying we have a newer one in place
if( !options.verify ) {
// the file exists so do nothing with that
return dfd.resolve(util.format(MSG_SKIP_SUCCESS, src));
return dfd.resolve(util.format(MSG_SKIP_SUCCESS, prettySrc));
}

// the file exists so let's check to make sure it's the right file, if not, we'll update it
Expand All @@ -442,7 +445,7 @@ exports.init = function (grunt) {
var remoteHash, localHash;

if (err) {
return dfd.reject(makeError(MSG_ERR_UPLOAD, src, err));
return dfd.reject(makeError(MSG_ERR_UPLOAD, prettySrc, err));
}
// The etag head in the response from s3 has double quotes around
// it. Strip them out.
Expand All @@ -453,23 +456,23 @@ exports.init = function (grunt) {

if (remoteHash === localHash) {
// the file exists and is the same so do nothing with that
return dfd.resolve(util.format(MSG_SKIP_MATCHES, src));
return dfd.resolve(util.format(MSG_SKIP_MATCHES, prettySrc));
}

fs.stat( src, function(err, stats) {
var remoteWhen, localWhen, upload;

if (err) {
return dfd.reject(makeError(MSG_ERR_UPLOAD, src, err));
}
return dfd.reject(makeError(MSG_ERR_UPLOAD, prettySrc, err));
}

// which one is newer? if local is newer, we should upload it
remoteWhen = new Date(res.headers['last-modified'] || "0"); // earliest date possible if no header is returned
localWhen = new Date(stats.mtime || "1"); // make second earliest date possible if mtime isn't set

if ( localWhen <= remoteWhen ) {
// Remote file was older
return dfd.resolve(util.format(MSG_SKIP_OLDER, src));
return dfd.resolve(util.format(MSG_SKIP_OLDER, prettySrc));
}

// default is that local is newer, only upload when it is
Expand Down
2 changes: 2 additions & 0 deletions test/s3Task.js
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,8 @@ module.exports = {
test.equal(config.key, s3Config.options.key, 'Key');
test.equal(config.secret, s3Config.options.secret, 'Secret');
test.equal(config.debug, false, 'Debug');
test.equal(config.logSuccess, true, 'Log success');
test.equal(config.logErrors, true, 'Log errors');

// Test the data actions
test.equal(config.upload.length, 1, 'Upload length');
Expand Down

0 comments on commit 2c712e0

Please sign in to comment.