Skip to content

Commit

Permalink
Merge pull request #186 from hasonmsft/master
Browse files Browse the repository at this point in the history
Storage Client Library 1.3.0
  • Loading branch information
vinjiang authored Aug 29, 2016
2 parents 0001158 + 2f58ea0 commit b12cd98
Show file tree
Hide file tree
Showing 24 changed files with 10,492 additions and 8,470 deletions.
4 changes: 4 additions & 0 deletions BreakingChanges.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
Tracking Breaking Changes in 1.3.0
QUEUE
* Updated the `QueueMessageResult.dequeueCount` from `string` to `number`.

Tracking Breaking Changes in 1.2.0
TABLE
* Beginning with version 2015-12-11, the Atom feed is no longer supported as a payload format for Table service operations. Version 2015-12-11 and later versions support only JSON for the payload format.
Expand Down
19 changes: 19 additions & 0 deletions ChangeLog.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,25 @@
Note: This is an Azure Storage only package. The all up Azure node sdk still has the old storage bits in there. In a future release, those storage bits will be removed and an npm dependency to this storage node sdk will
be taken. This is a GA release and the changes described below indicate the changes from the Azure node SDK 0.9.8 available here - https://github.com/Azure/azure-sdk-for-node.

2016.08 Version 1.3.0

ALL

* Fixed the issue that retry filter will fail against storage emulator.
* Fixed a hang issue of `StorageServiceClient` with retry policy filter set when retrying sending the request, the stream is not readable anymore.
* Updated the default value of `CorsRule.ExposedHeaders`, `CorsRule.AllowedHeaders` to empty and `CorsRule.MaxAgeInSeconds` to `0` for `setServiceProperties` APIs of all services.
* Fixed the issue that service SAS doesn't work if specifying the `AccessPolicy.Protocols`.

BLOB
* Added the API `BlobService.getPageRangesDiff` for getting the page ranges difference. Refer to https://msdn.microsoft.com/en-us/library/azure/mt736912.aspx for more detailed information.

QUEUE
* Updated the `QueueMessageResult.dequeueCount` from `string` to `number`.
* Added the API `QueueService.getUrl` for getting the queue url.

TABLE
* Added the API `TableService.getUrl` for getting the table url.

2016.07 Version 1.2.0

ALL
Expand Down
9 changes: 9 additions & 0 deletions lib/azure-storage.js
Original file line number Diff line number Diff line change
Expand Up @@ -344,6 +344,15 @@ exports.generateAccountSharedAccessSignature = function(storageAccountOrConnecti
* @property {number} end The end of the range.
*/

/**
* The range diff. Refer to https://msdn.microsoft.com/en-us/library/azure/mt736912.aspx
* @typedef {object} RangeDiff
* @property {number} start The start of the range.
* @property {number} end The end of the range.
* @property {boolean} isCleared If the range is cleared or not.
*/

exports.Constants = azureCommon.Constants;
exports.StorageUtilities = azureCommon.StorageUtilities;
exports.AccessCondition = azureCommon.AccessCondition;
Expand Down
8 changes: 5 additions & 3 deletions lib/common/filters/retrypolicyfilter.js
Original file line number Diff line number Diff line change
Expand Up @@ -126,9 +126,11 @@ RetryPolicyFilter._handle = function (self, requestOptions, next) {

// Only in the case of success from server but client side failure like MD5 or length mismatch, returnObject.retryable has a value(we explicitly set it to false).
// In this case, we should not retry the request.
if (returnObject.error && azureutil.objectIsNull(returnObject.retryable) &&
// If the output stream already get sent to server and get error back,
// we should NOT retry within the SDK as the stream data is not valid anymore if we retry directly.
if (!returnObject.outputStreamSent && returnObject.error && azureutil.objectIsNull(returnObject.retryable) &&
((!azureutil.objectIsNull(returnObject.response) && retryInfo.retryable) ||
(returnObject.error.code === 'ETIMEDOUT' || returnObject.error.code === 'ESOCKETTIMEDOUT' || returnObject.error.code === 'ECONNRESET'))) {
(returnObject.error.code === 'ETIMEDOUT' || returnObject.error.code === 'ESOCKETTIMEDOUT' || returnObject.error.code === 'ECONNRESET' || returnObject.error.code === 'EAI_AGAIN'))) {

if (retryRequestOptions.currentLocation === Constants.StorageLocation.PRIMARY) {
lastPrimaryAttempt = returnObject.operationEndTime;
Expand Down Expand Up @@ -185,7 +187,7 @@ RetryPolicyFilter._handle = function (self, requestOptions, next) {

RetryPolicyFilter._shouldRetryOnError = function (statusCode, requestOptions) {
var retryInfo = (requestOptions && requestOptions.retryContext) ? requestOptions.retryContext : {};

// Non-timeout Cases
if (statusCode >= 300 && statusCode != 408) {
// Always no retry on "not implemented" and "version not supported"
Expand Down
12 changes: 12 additions & 0 deletions lib/common/models/servicepropertiesresult.js
Original file line number Diff line number Diff line change
Expand Up @@ -156,18 +156,30 @@ function serializeCorsRules(doc, rules){
doc = doc.ele(ServicePropertiesConstants.ALLOWED_HEADERS_ELEMENT)
.txt(rule.AllowedHeaders.join(','))
.up();
} else {
doc = doc.ele(ServicePropertiesConstants.ALLOWED_HEADERS_ELEMENT)
.txt('')
.up();
}

if(typeof rule.ExposedHeaders !== 'undefined' && _.isArray(rule.ExposedHeaders)){
doc = doc.ele(ServicePropertiesConstants.EXPOSED_HEADERS_ELEMENT)
.txt(rule.ExposedHeaders.join(','))
.up();
} else {
doc = doc.ele(ServicePropertiesConstants.EXPOSED_HEADERS_ELEMENT)
.txt('')
.up();
}

if(typeof rule.MaxAgeInSeconds !== 'undefined'){
doc = doc.ele(ServicePropertiesConstants.MAX_AGE_IN_SECONDS_ELEMENT)
.txt(rule.MaxAgeInSeconds)
.up();
} else {
doc = doc.ele(ServicePropertiesConstants.MAX_AGE_IN_SECONDS_ELEMENT)
.txt('0')
.up();
}

doc = doc.up();
Expand Down
42 changes: 40 additions & 2 deletions lib/common/services/storageserviceclient.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
// Module dependencies.
var request = require('request');
var url = require('url');
var qs = require('querystring');
var util = require('util');
var xml2js = require('xml2js');
var events = require('events');
Expand Down Expand Up @@ -269,6 +270,17 @@ StorageServiceClient.prototype._performRequest = function (webResource, body, op
// Initialize the operationExpiryTime
this._setOperationExpiryTime(options);

// If the output stream already got sent to server and got error back,
// we should NOT retry within the SDK as the stream data is not valid anymore if we retry directly.
// And it's very hard for SDK to re-wind the stream.
//
// If users want to retry on this kind of error, they can implement their own logic to parse the response and
// determine if they need to re-prepare a stream and call our SDK API to retry.
//
// Currently for blobs/files with size greater than 32MB (DEFAULT_SINGLE_BLOB_PUT_THRESHOLD_IN_BYTES),
// we'll send the steam by chunk buffers which doesn't have this issue.
var outputStreamSent = false;

var operation = function (options, next) {
self._validateLocation(options);
var currentLocation = options.currentLocation;
Expand Down Expand Up @@ -299,7 +311,8 @@ StorageServiceClient.prototype._performRequest = function (webResource, body, op
responseObject.operationEndTime = new Date();
// Required for listing operations to make sure successive operations go to the same location.
responseObject.targetLocation = currentLocation;

responseObject.outputStreamSent = outputStreamSent;

callback(responseObject, next);
};

Expand Down Expand Up @@ -450,6 +463,7 @@ StorageServiceClient.prototype._performRequest = function (webResource, body, op
var index = 0;

body.outputStream.on('data', function (d) {
outputStreamSent = true;
if(self._maximumExecutionTimeExceeded(Date.now(), options.operationExpiryTime)) {
processResponseCallback(new TimeoutError(SR.MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION));
} else {
Expand All @@ -470,6 +484,7 @@ StorageServiceClient.prototype._performRequest = function (webResource, body, op
// NOTE: workaround for an unexpected EPIPE exception when piping streams larger than 29 MB
if (!azureutil.objectIsNull(finalRequestOptions.headers['content-length']) && finalRequestOptions.headers['content-length'] < 29 * 1024 * 1024) {
body.outputStream.pipe(buildRequest());
outputStreamSent = true;

if (azureutil.isStreamPaused(body.outputStream)) {
body.outputStream.resume();
Expand Down Expand Up @@ -864,7 +879,9 @@ StorageServiceClient.getStorageSettings = function (storageAccountOrConnectionSt
*/
StorageServiceClient.prototype._setRequestUrl = function (webResource, options) {
// Normalize the path
webResource.path = this._getPath(webResource.path);
// Backup the original path of the webResource to make sure it works fine even this function get executed multiple times - like RetryFilter
webResource.originalPath = webResource.originalPath || webResource.path;
webResource.path = this._getPath(webResource.originalPath);

if(!this.host){
throw new ArgumentNullError('this.host', SR.STORAGE_HOST_LOCATION_REQUIRED);
Expand Down Expand Up @@ -903,6 +920,27 @@ StorageServiceClient.prototype._getPath = function (path) {
return path;
};

/**
* Get the url of a given path
*/
StorageServiceClient.prototype._getUrl = function (path, sasToken, primary) {
var host;
if (!azureutil.objectIsNull(primary) && primary === false) {
host = this.host.secondaryHost;
} else {
host = this.host.primaryHost;
}

host = azureutil.trimPortFromUri(host);
if(host && host.lastIndexOf('/') !== (host.length - 1)){
host = host + '/';
}

var query = qs.parse(sasToken);
var fullPath = url.format({ pathname: this._getPath(path), query: query });
return url.resolve(host, fullPath);
};

/**
* Initializes the default filter.
* This filter is responsible for chaining the pre filters request into the operation and, after processing the response,
Expand Down
2 changes: 1 addition & 1 deletion lib/common/signing/sharedkey.js
Original file line number Diff line number Diff line change
Expand Up @@ -467,8 +467,8 @@ SharedKey.prototype._generateSignature = function (serviceType, path, sharedAcce
getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Expiry : '') +
getvalueToAppend(canonicalizedResource) +
getvalueToAppend(sharedAccessPolicy.Id) +
getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Protocols : '') +
getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.IPAddressOrRange : '') +
getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Protocols : '') +
sasVersion;

if(sasVersion == CompatibleVersionConstants.FEBRUARY_2012) {
Expand Down
10 changes: 9 additions & 1 deletion lib/common/util/constants.js
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ var Constants = {
/*
* Specifies the value to use for UserAgent header.
*/
USER_AGENT_PRODUCT_VERSION: '1.2.0',
USER_AGENT_PRODUCT_VERSION: '1.3.0',

/**
* The number of default concurrent requests for parallel operation.
Expand Down Expand Up @@ -1730,6 +1730,14 @@ var Constants = {
*/
SNAPSHOT: 'snapshot',

/**
* The previous snapshot value.
*
* @const
* @type {string}
*/
PREV_SNAPSHOT: 'prevsnapshot',

/**
* The timeout value.
*
Expand Down
130 changes: 125 additions & 5 deletions lib/services/blob/blobservice.js
Original file line number Diff line number Diff line change
Expand Up @@ -2241,9 +2241,16 @@ BlobService.prototype.generateSharedAccessSignature = function (container, blob,
* @example
* var azure = require('azure-storage');
* var blobService = azure.createBlobService();
* //create a SAS that expires in an hour
* var sasToken = blobService.generateSharedAccessSignature(containerName, blobName, { AccessPolicy: { Expiry: azure.date.minutesFromNow(60); } });
* var sasUrl = blobService.getUrl(containerName, blobName, sasToken, true);
* var sharedAccessPolicy = {
* AccessPolicy: {
* Permissions: azure.BlobUtilities.SharedAccessPermissions.READ,
* Start: startDate,
* Expiry: expiryDate
* },
* };
*
* var sasToken = blobService.generateSharedAccessSignature(containerName, blobName, sharedAccessPolicy);
* var sasUrl = blobService.getUrl(containerName, blobName, sasToken);
*/
BlobService.prototype.getUrl = function (container, blob, sasToken, primary, snapshotId) {
validate.validateArgs('getUrl', function (v) {
Expand All @@ -2254,8 +2261,7 @@ BlobService.prototype.getUrl = function (container, blob, sasToken, primary, sna
var host;
if (!azureutil.objectIsNull(primary) && primary === false) {
host = this.host.secondaryHost;
}
else {
} else {
host = this.host.primaryHost;
}

Expand Down Expand Up @@ -2680,6 +2686,120 @@ BlobService.prototype.listPageRanges = function (container, blob, optionsOrCallb
this.performRequest(webResource, null, options, processResponseCallback);
};

/**
* Gets page ranges that have been updated or cleared since the snapshot specified by `previousSnapshotTime` was taken. Gets all of the page ranges by default, or only the page ranges over a specific range of bytes if rangeStart and rangeEnd are specified.
*
* @this {BlobService}
* @param {string} container The container name.
* @param {string} blob The blob name.
* @param {string} previousSnapshotTime The previous snapshot time for comparison. Must be prior to `options.snapshotId` if it's provided.
* @param {object} [options] The request options.
* @param {AccessConditions} [options.accessConditions] The access conditions.
* @param {int} [options.rangeStart] The range start.
* @param {int} [options.rangeEnd] The range end.
* @param {string} [options.snapshotId] The snapshot identifier.
* @param {string} [options.leaseId] The target blob lease identifier.
* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to.
* Please see StorageUtilities.LocationMode for the possible values.
* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request.
* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request.
* The maximum execution time interval begins at the time that the client begins building the request. The maximum
* execution time is checked intermittently while performing requests, and before executing retries.
* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit.
* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false.
* The default value is false.
* @param {errorOrResult} callback `error` will contain information
* if an error occurs; otherwise `result` will contain
* the page ranges diff information, see `[RangeDiff]{@link RangeDiff}` for detailed information.
* `response` will contain information related to this operation.
*/
BlobService.prototype.getPageRangesDiff = function (container, blob, previousSnapshotTime, optionsOrCallback, callback) {
var userOptions;
azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; });

validate.validateArgs('getPageRangesDiff', function (v) {
v.string(container, 'container');
v.string(blob, 'blob');
v.containerNameIsValid(container);
v.callback(callback);
});

var options = extend(true, {}, userOptions);

var resourceName = createResourceName(container, blob);
var webResource = WebResource.get(resourceName)
.withQueryOption(QueryStringConstants.COMP, 'pagelist')
.withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId)
.withQueryOption(QueryStringConstants.PREV_SNAPSHOT, previousSnapshotTime);

if (options.rangeStart && options.rangeStart % BlobConstants.PAGE_SIZE !== 0) {
throw new RangeError(SR.INVALID_PAGE_START_OFFSET);
}

if (options.rangeEnd && (options.rangeEnd + 1) % BlobConstants.PAGE_SIZE !== 0) {
throw new RangeError(SR.INVALID_PAGE_END_OFFSET);
}

if (options.rangeEnd && (options.rangeEnd + 1) % BlobConstants.PAGE_SIZE !== 0) {
throw new RangeError(SR.INVALID_PAGE_END_OFFSET);
}

BlobResult.setHeadersFromBlob(webResource, options);

options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY;

var processResponseCallback = function (responseObject, next) {
responseObject.pageRangesDiff = null;
if (!responseObject.error) {
responseObject.pageRangesDiff = [];

if (responseObject.response.body.PageList.PageRange) {
var updatedPageRanges = responseObject.response.body.PageList.PageRange;

if (!_.isArray(updatedPageRanges)) {
updatedPageRanges = [ updatedPageRanges ];
}

updatedPageRanges.forEach(function (pageRange) {
var range = {
start: parseInt(pageRange.Start, 10),
end: parseInt(pageRange.End, 10),
isCleared: false
};

responseObject.pageRangesDiff.push(range);
});
}

if (responseObject.response.body.PageList.ClearRange) {
var clearedPageRanges = responseObject.response.body.PageList.ClearRange;

if (!_.isArray(clearedPageRanges)) {
clearedPageRanges = [ clearedPageRanges ];
}

clearedPageRanges.forEach(function (pageRange) {
var range = {
start: parseInt(pageRange.Start, 10),
end: parseInt(pageRange.End, 10),
isCleared: true
};

responseObject.pageRangesDiff.push(range);
});
}
}

var finalCallback = function (returnObject) {
callback(returnObject.error, returnObject.pageRangesDiff, returnObject.response);
};

next(responseObject, finalCallback);
};

this.performRequest(webResource, null, options, processResponseCallback);
};

/**
* Clears a range of pages.
*
Expand Down
13 changes: 11 additions & 2 deletions lib/services/file/fileservice.js
Original file line number Diff line number Diff line change
Expand Up @@ -1589,8 +1589,17 @@ FileService.prototype.generateSharedAccessSignature = function (share, directory
* @return {string} The formatted URL string.
* @example
* var azure = require('azure-storage');
* var FileService = azure.createFileService();
* var url = FileService.getUrl(shareName, directoryName, fileName, sasToken, true);
* var fileService = azure.createFileService();
* var sharedAccessPolicy = {
* AccessPolicy: {
* Permissions: azure.FileUtilities.SharedAccessPermissions.READ,
* Start: startDate,
* Expiry: expiryDate
* },
* };
*
* var sasToken = fileService.generateSharedAccessSignature(shareName, directoryName, fileName, sharedAccessPolicy);
* var url = fileService.getUrl(shareName, directoryName, fileName, sasToken, true);
*/
FileService.prototype.getUrl = function (share, directory, file, sasToken, primary) {
validate.validateArgs('getUrl', function (v) {
Expand Down
Loading

0 comments on commit b12cd98

Please sign in to comment.