diff --git a/README.md b/README.md index 947c797..3fb7008 100644 --- a/README.md +++ b/README.md @@ -49,17 +49,22 @@ public-read-write`, `authenticated-read`, `bucket-owner-read`, `bucket-owner-ful * **gzip** - (*boolean*) If true, uploads will be gzip-encoded. * **gzipExclude** - (*array*) Define extensions of files you don't want to run gzip on, an array of strings ie: `['.jpg', '.jpeg', '.png']`. * **upload** - (*array*) An array of objects, each object representing a file upload and containing a `src` -and a `dest`. Any of the above values may also be overriden. Passing `rel:DIR` will cause the filesnames to be -expanded so that wild cards are not passed to the source name. +and a `dest`. Any of the above values may also be overriden. + + Passing `rel:DIR` will: + - Cause the filenames to be expanded relative to some relative or absolute path on the filesystem (`DIR`). This operation is exclusive of `DIR`, i.e., `DIR` itself will not be included in the expansion. + - Cause wildcards in 'src' to be replaced with actual paths and/or filenames. + * **download** - (*array*) An array of objects, each object representing a file download and containing a `src` and a `dest`. Any of the above values may also be overriden. * **del** - (*array*) An array of objects, each object containing a `src` to delete from s3. Any of the above values may also be overriden. -* **sync** - (*array*) An array of ojects, each oject containing a `src` and `dest`. Default behavior is to -only upload new files (that don't exist). Adding `verify:true` forces an MD5 hash and Modified time check prior -to overwriting the server files. +* **sync** - (*array*) An array of ojects, each oject containing a `src` and `dest`. Default behavior is to only upload new files (that don't exist). Set a key called `verify` with the value `true` on this object's options property (i.e. `options: {verify: true}`) to upload existing files if and only if they are newer than the versions of those same files on the server. This is implemented via an MD5 hash and by checking the modified times of the files. * **trackChanges** - (*boolean*) Default `false`. If true, an array of changed assets can be retrieved from `grunt.config('s3.changed')` * **debug** - (*boolean*) If true, no transfers with S3 will occur, will print all actions for review by user +* **logSuccess** - (*boolean*) If false, output for successful transfers will be ignored. Default: true +* **logErrors** - (*boolean*) If false, output for failed transfers will be ignored. Default: true + ### Example @@ -101,8 +106,10 @@ grunt.initConfig({ dest: 'documents/ignore.txt', // These values will override the above settings. - bucket: 'some-specific-bucket', - access: 'authenticated-read' + options: { + bucket: 'some-specific-bucket', + access: 'authenticated-read' + } }, { // Wildcards are valid *for uploads only* until I figure out a good implementation @@ -144,7 +151,7 @@ grunt.initConfig({ }, { // make sure this document is newer than the one on S3 and replace it - verify: true, + options: { verify: true }, src: 'passwords.txt', dest: 'documents/ignore.txt' }, diff --git a/package.json b/package.json index 0619d64..3eec428 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "grunt-s3", "description": "A grunt task to automate moving files to/from Amazon S3.", - "version": "0.2.0-alpha.2", + "version": "0.2.0-alpha.3", "author": "Aaron Forsander (https://github.com/pifantastic)", "homepage": "https://github.com/pifantastic/grunt-s3", "repository": { diff --git a/tasks/lib/S3Task.js b/tasks/lib/S3Task.js index a209a7b..aab9338 100644 --- a/tasks/lib/S3Task.js +++ b/tasks/lib/S3Task.js @@ -61,12 +61,16 @@ S3Task.prototype = { var transfer = transferFn(); transfer.done(function (msg) { - grunt.log.ok(msg); + if (config.logSuccess) { + grunt.log.ok(msg); + } completed(); }); transfer.fail(function (msg) { - grunt.log.error(msg); + if (config.logErrors) { + grunt.log.error(msg); + } ++errors; completed(); }); @@ -137,7 +141,9 @@ S3Task.prototype = { debug: false, verify: false, maxOperations: 0, - encodePaths: false + encodePaths: false, + logSuccess: true, + logErrors: true }; // Grab the actions to perform from the task data, default to empty arrays diff --git a/tasks/lib/s3.js b/tasks/lib/s3.js index 92feaea..78fb4a8 100644 --- a/tasks/lib/s3.js +++ b/tasks/lib/s3.js @@ -28,7 +28,7 @@ var existsSync = ('existsSync' in fs) ? fs.existsSync : path.existsSync; /** * Success/error messages. */ -var MSG_UPLOAD_SUCCESS = '↗'.blue + ' Uploaded: %s (%s)'; +var MSG_UPLOAD_SUCCESS = '↗'.blue + ' Uploaded: %s to %s:%s (%s)'; var MSG_DOWNLOAD_SUCCESS = '↙'.yellow + ' Downloaded: %s (%s)'; var MSG_DELETE_SUCCESS = '✗'.red + ' Deleted: %s'; var MSG_COPY_SUCCESS = '→'.cyan + ' Copied: %s to %s'; @@ -117,13 +117,14 @@ exports.init = function (grunt) { exports.put = exports.upload = function (src, dest, opts) { var dfd = new _.Deferred(); var options = makeOptions(opts); + var prettySrc = path.relative(process.cwd(), src); // Make sure the local file exists. if (!existsSync(src)) { - return dfd.reject(makeError(MSG_ERR_NOT_FOUND, src)); + return dfd.reject(makeError(MSG_ERR_NOT_FOUND, prettySrc)); } - var headers = options.headers || {}; + var headers = _.clone(options.headers || {}); if (options.access) { headers['x-amz-acl'] = options.access; @@ -133,7 +134,7 @@ exports.init = function (grunt) { var client = makeClient(options); if (options.debug) { - return dfd.resolve(util.format(MSG_UPLOAD_DEBUG, path.relative(process.cwd(), src), client.bucket, dest)).promise(); + return dfd.resolve(util.format(MSG_UPLOAD_DEBUG, prettySrc, client.bucket, dest)).promise(); } // Encapsulate this logic to make it easier to gzip the file first if @@ -146,13 +147,13 @@ exports.init = function (grunt) { // If there was an upload error or any status other than a 200, we // can assume something went wrong. if (err || res.statusCode !== 200) { - cb(makeError(MSG_ERR_UPLOAD, src, err || res.statusCode)); + cb(makeError(MSG_ERR_UPLOAD, prettySrc, err || res.statusCode)); } else { // Read the local file so we can get its md5 hash. fs.readFile(src, function (err, data) { if (err) { - cb(makeError(MSG_ERR_UPLOAD, src, err)); + cb(makeError(MSG_ERR_UPLOAD, prettySrc, err)); } else { // The etag head in the response from s3 has double quotes around @@ -163,14 +164,14 @@ exports.init = function (grunt) { var localHash = crypto.createHash('md5').update(data).digest('hex'); if (remoteHash === localHash) { - var msg = util.format(MSG_UPLOAD_SUCCESS, src, localHash); + var msg = util.format(MSG_UPLOAD_SUCCESS, prettySrc, client.bucket, dest, localHash); if (options.trackChanges) { trackChanges(dest); } cb(null, msg); } else { - cb(makeError(MSG_ERR_CHECKSUM, 'Upload', localHash, remoteHash, src)); + cb(makeError(MSG_ERR_CHECKSUM, 'Upload', localHash, remoteHash, prettySrc)); } } }); @@ -203,11 +204,12 @@ exports.init = function (grunt) { // Gzip the file and upload when done. input.pipe(zlib.createGzip()).pipe(output) .on('error', function (err) { - dfd.reject(makeError(MSG_ERR_UPLOAD, src, err)); + dfd.reject(makeError(MSG_ERR_UPLOAD, prettySrc, err)); }) .on('close', function () { // Update the src to point to the newly created .gz file. src = tmp.path; + prettySrc += ' (gzip)'; upload(function (err, msg) { // Clean up the temp file. tmp.unlinkSync(); @@ -406,12 +408,13 @@ exports.init = function (grunt) { exports.sync = function (src, dest, opts) { var dfd = new _.Deferred(); var options = makeOptions(opts); + var prettySrc = path.relative(process.cwd(), src); // Pick out the configuration options we need for the client. var client = makeClient(options); if (options.debug) { - return dfd.resolve(util.format(MSG_SKIP_DEBUG, client.bucket, src)).promise(); + return dfd.resolve(util.format(MSG_SKIP_DEBUG, client.bucket, prettySrc)).promise(); } // Check for the file on s3 @@ -424,16 +427,16 @@ exports.init = function (grunt) { upload = exports.upload( src, dest, opts); // pass through the dfd state return upload.then( dfd.resolve, dfd.reject ); - } - + } + if (!res || err || res.statusCode !== 200 ) { - return dfd.reject(makeError(MSG_ERR_DOWNLOAD, src, err || res.statusCode)); - } + return dfd.reject(makeError(MSG_ERR_DOWNLOAD, prettySrc, err || res.statusCode)); + } // we do not wish to overwrite a file that exists by verifying we have a newer one in place if( !options.verify ) { // the file exists so do nothing with that - return dfd.resolve(util.format(MSG_SKIP_SUCCESS, src)); + return dfd.resolve(util.format(MSG_SKIP_SUCCESS, prettySrc)); } // the file exists so let's check to make sure it's the right file, if not, we'll update it @@ -442,7 +445,7 @@ exports.init = function (grunt) { var remoteHash, localHash; if (err) { - return dfd.reject(makeError(MSG_ERR_UPLOAD, src, err)); + return dfd.reject(makeError(MSG_ERR_UPLOAD, prettySrc, err)); } // The etag head in the response from s3 has double quotes around // it. Strip them out. @@ -453,15 +456,15 @@ exports.init = function (grunt) { if (remoteHash === localHash) { // the file exists and is the same so do nothing with that - return dfd.resolve(util.format(MSG_SKIP_MATCHES, src)); + return dfd.resolve(util.format(MSG_SKIP_MATCHES, prettySrc)); } fs.stat( src, function(err, stats) { var remoteWhen, localWhen, upload; if (err) { - return dfd.reject(makeError(MSG_ERR_UPLOAD, src, err)); - } + return dfd.reject(makeError(MSG_ERR_UPLOAD, prettySrc, err)); + } // which one is newer? if local is newer, we should upload it remoteWhen = new Date(res.headers['last-modified'] || "0"); // earliest date possible if no header is returned @@ -469,7 +472,7 @@ exports.init = function (grunt) { if ( localWhen <= remoteWhen ) { // Remote file was older - return dfd.resolve(util.format(MSG_SKIP_OLDER, src)); + return dfd.resolve(util.format(MSG_SKIP_OLDER, prettySrc)); } // default is that local is newer, only upload when it is diff --git a/test/s3Task.js b/test/s3Task.js index 7646bb2..44839bf 100644 --- a/test/s3Task.js +++ b/test/s3Task.js @@ -132,6 +132,8 @@ module.exports = { test.equal(config.key, s3Config.options.key, 'Key'); test.equal(config.secret, s3Config.options.secret, 'Secret'); test.equal(config.debug, false, 'Debug'); + test.equal(config.logSuccess, true, 'Log success'); + test.equal(config.logErrors, true, 'Log errors'); // Test the data actions test.equal(config.upload.length, 1, 'Upload length');