diff --git a/package.json b/package.json index ce354b83c5..05d28b7c89 100644 --- a/package.json +++ b/package.json @@ -127,6 +127,7 @@ "flow-bin": "^0.50.0", "husky": "^0.14.3", "intl": "^1.2.5", + "jsuri": "^1.3.1", "karma": "^1.7.0", "karma-chai": "^0.1.0", "karma-chai-sinon": "^0.1.5", @@ -179,6 +180,7 @@ "webpack-dev-server": "^2.5.1" }, "peerDependencies": { + "jsuri": "^1.3.1", "lodash.clonedeep": "^4.5.0", "lodash.debounce": "^4.0.8", "lodash.noop": "^3.0.1", diff --git a/src/api/Base.js b/src/api/Base.js index a47a25569e..49ac709521 100644 --- a/src/api/Base.js +++ b/src/api/Base.js @@ -4,6 +4,7 @@ * @author Box */ +import noop from 'lodash.noop'; import Xhr from '../util/Xhr'; import Cache from '../util/Cache'; import { DEFAULT_HOSTNAME_API, DEFAULT_HOSTNAME_UPLOAD } from '../constants'; @@ -40,6 +41,16 @@ class Base { */ options: Options; + /** + * @property {Function} + */ + consoleLog: Function; + + /** + * @property {Function} + */ + consoleError: Function; + /** * [constructor] * @@ -62,6 +73,8 @@ class Base { }); this.xhr = new Xhr(this.options); this.destroyed = false; + this.consoleLog = !!options.consoleLog && !!window.console ? window.console.log || noop : noop; + this.consoleError = !!options.consoleError && !!window.console ? window.console.error || noop : noop; } /** diff --git a/src/api/uploads/BaseMultiput.js b/src/api/uploads/BaseMultiput.js new file mode 100644 index 0000000000..bec2ca7916 --- /dev/null +++ b/src/api/uploads/BaseMultiput.js @@ -0,0 +1,80 @@ +/** + * @flow + * @file Multiput upload base class + * @author Box + */ +import Base from '../Base'; +import type { MultiputConfig, Options } from '../../flowTypes'; + +const DEFAULT_MULTIPUT_CONFIG: MultiputConfig = { + digestReadahead: 5, // How many parts past those currently uploading to precompute digest for + initialRetryDelayMs: 5000, // Base for exponential backoff on retries + maxRetryDelayMs: 60000, // Upper bound for time between retries + parallelism: 5, // Maximum number of parts to upload at a time + requestTimeoutMs: 120000, // Idle timeout on part upload, overall request timeout on other requests + // eslint-disable-next-line max-len + retries: 5 // How many times to retry requests such as upload part or commit. Note that total number of attempts will be retries + 1 in worst case where all attempts fail. +}; + +class BaseMultiput extends Base { + config: MultiputConfig; + sessionEndpoints: Object; + canConsoleLog: boolean; + + /** + * [constructor] + * + * @param {Options} options + * @param {Object} sessionEndpoints + * @param {MultiputConfig} [config] + * @return {void} + */ + constructor(options: Options, sessionEndpoints: Object, config?: MultiputConfig): void { + super(options); + + this.config = config || DEFAULT_MULTIPUT_CONFIG; + this.sessionEndpoints = sessionEndpoints; + this.canConsoleLog = !!options.consoleLog && !!window.console && !!window.console.log; + } + + /** + * Console log a function returned message + * + * @param {Function} msgFunc + * @return {void} + */ + consoleLogFunc = (msgFunc: Function): void => { + if (!this.canConsoleLog) { + return; + } + + this.consoleLog(msgFunc()); + }; + + /** + * POST log event + * + * @param {string} eventType + * @param {string} [eventInfo] + * @return {Promise} + */ + logEvent = (eventType: string, eventInfo?: string) => { + const data: { + event_type: string, + event_info?: string + } = { + event_type: eventType + }; + + if (eventInfo) { + data.event_info = eventInfo; + } + + return this.xhr.post({ + url: this.sessionEndpoints.logEvent, + data + }); + }; +} + +export default BaseMultiput; diff --git a/src/api/uploads/MultiputPart.js b/src/api/uploads/MultiputPart.js index 89d305f742..ff579c62f6 100644 --- a/src/api/uploads/MultiputPart.js +++ b/src/api/uploads/MultiputPart.js @@ -3,7 +3,11 @@ * @file Multiput upload part * @author Box */ -import Base from '../Base'; +import noop from 'lodash.noop'; +import BaseMultiput from './BaseMultiput'; +import type { MultiputConfig, Options, MultiputData } from '../../flowTypes'; +import { updateQueryParameters } from '../../util/url'; +import { getBoundedExpBackoffRetryDelay } from '../../util/uploads'; const PART_STATE_NOT_STARTED: 0 = 0; const PART_STATE_COMPUTING_DIGEST: 1 = 1; @@ -11,12 +15,12 @@ const PART_STATE_DIGEST_READY: 2 = 2; const PART_STATE_UPLOADING: 3 = 3; const PART_STATE_UPLOADED: 4 = 4; -class MultiputPart extends Base { +class MultiputPart extends BaseMultiput { index: number; numDigestRetriesPerformed: number; numUploadRetriesPerformed: number; offset: number; - sha256: ?string; + sha256: string; size: number; state: | typeof PART_STATE_NOT_STARTED @@ -26,38 +30,291 @@ class MultiputPart extends Base { | typeof PART_STATE_UPLOADED; timing: Object; uploadedBytes: number; + onProgress: Function; + onSuccess: Function; + onError: Function; + data: MultiputData; + config: MultiputConfig; + id: string; + retryTimeout: ?number; + blob: ?Blob; + rangeEnd: number; + startTimestamp: number; + getNumPartsUploading: Function; /** * [constructor] * - * @param {Object} options + * @param {Options} options * @param {number} index - 0-based index of this part in array of all parts * @param {number} offset - Starting byte offset of this part's range * @param {number} size - Size of this part in bytes + * @param {number} sessionId + * @param {Object} sessionEndpoints + * @param {MultiputConfig} config + * @param {Function} getNumPartsUploading + * @param {Function} [onSuccess] + * @param {Function} [onProgress] + * @param {Function} [onError] * @return {void} */ - constructor(options: Object, index: number, offset: number, size: number): void { - super(options); + constructor( + options: Options, + index: number, + offset: number, + size: number, + sessionId: string, + sessionEndpoints: Object, + config: MultiputConfig, + getNumPartsUploading: Function, + onSuccess?: Function, + onProgress?: Function, + onError?: Function + ): void { + super(options, sessionEndpoints, config); this.index = index; this.numDigestRetriesPerformed = 0; this.numUploadRetriesPerformed = 0; this.offset = offset; - this.sha256 = null; this.size = size; this.state = PART_STATE_NOT_STARTED; this.timing = {}; this.uploadedBytes = 0; + this.data = {}; + this.config = config; + this.rangeEnd = offset + size - 1; + this.onSuccess = onSuccess || noop; + this.onError = onError || noop; + this.onProgress = onProgress || noop; + this.getNumPartsUploading = getNumPartsUploading; } + toJSON = () => + JSON.stringify({ + index: this.index, + offset: this.offset, + size: this.size, + state: this.state, + uploadedBytes: this.uploadedBytes, + numUploadRetriesPerformed: this.numUploadRetriesPerformed, + numDigestRetriesPerformed: this.numDigestRetriesPerformed, + sha256: this.sha256, + timing: this.timing + }); + + /** + * Returns file part information from the server after part upload is successful + * + * @return {Object} + */ + getPart = (): Object => this.data.part || {}; + + /** + * Uploads this Part via the API. Will retry on network failures. + * + * @return {void} + */ + upload = (): void => { + if (this.isDestroyed()) { + return; + } + + if (!this.sha256) { + throw new Error('Part SHA-256 unavailable'); + } + + if (!this.blob) { + throw new Error('Part blob unavailable'); + } + + const clientEventInfo = { + documentHidden: document.hidden, + digest_retries: this.numDigestRetriesPerformed, + timing: this.timing, + parts_uploading: this.getNumPartsUploading() + }; + + const headers = { + 'Content-Type': 'application/octet-stream', + Digest: `sha-256=${this.sha256}`, + 'Content-Range': `bytes ${this.offset}-${this.rangeEnd}/${this.size}`, + 'X-Box-Client-Event-Info': JSON.stringify(clientEventInfo) + }; + + this.startTimestamp = Date.now(); + + this.xhr.uploadFile({ + url: this.sessionEndpoints.upload_part, + data: this.blob, + headers, + method: 'PUT', + successHandler: this.uploadSuccessHandler, + errorHandler: this.uploadErrorHandler, + progressHandler: this.uploadProgressHandler, + withIdleTimeout: true, + idleTimeoutDuration: this.config.requestTimeoutMs + }); + }; + + /** + * Handler for upload part success + * + * @param {Object} data + * @return {void} + */ + uploadSuccessHandler = (data: Object) => { + if (this.isDestroyed()) { + return; + } + + this.state = PART_STATE_UPLOADED; + this.consoleLogFunc(() => `Upload completed: ${this.toJSON()}.`); + this.data = data; + this.blob = null; + this.timing.uploadTime = Date.now() - this.startTimestamp; + + this.onSuccess(this); + + this.uploadedBytes = this.size; + }; + /** - * Upload the part - * TODO: implement this + * Handler for upload part progress event * - * @public + * @param {ProgressEvent} data * @return {void} */ - upload = (): void => {}; + uploadProgressHandler = (event: ProgressEvent) => { + if (this.isDestroyed()) { + return; + } + + const newUploadedBytes = parseInt(event.loaded, 10); + const prevUploadedBytes = this.uploadedBytes; + this.uploadedBytes = newUploadedBytes; + + this.onProgress(prevUploadedBytes, newUploadedBytes); + }; + + /** + * Handler for upload part error + * + * @param {Error} error + * @return {void} + */ + uploadErrorHandler = (error: Error) => { + if (this.isDestroyed()) { + return; + } + + this.consoleLogFunc( + () => `Upload failure ${error.message} for part ${this.toJSON()}. XHR state: ${this.xhr.xhr.readyState}.` + ); + const eventInfo = { + message: error.message, + part: { + uploadedBytes: this.uploadedBytes, + id: this.id, + index: this.index, + offset: this.offset + }, + xhr_ready_state: this.xhr.xhr.readyState, + xhr_status_text: this.xhr.xhr.statusText + }; + const eventInfoString = JSON.stringify(eventInfo); + this.logEvent('part_failure', eventInfoString); + + if (this.numUploadRetriesPerformed >= this.config.retries) { + this.onError(error, eventInfoString); + return; + } + + const retryDelayMs = getBoundedExpBackoffRetryDelay( + this.config.initialRetryDelayMs, + this.config.maxRetryDelayMs, + this.numUploadRetriesPerformed + ); + + this.numUploadRetriesPerformed += 1; + this.consoleLog(`Retrying uploading part ${this.toJSON()} in ${retryDelayMs} ms`); + this.retryTimeout = setTimeout(this.retryUpload, retryDelayMs); + }; + + /** + * Retry uploading part + * + * @return {Promise} + */ + retryUpload = async (): Promise<> => { + if (this.isDestroyed()) { + return; + } + + try { + if (this.uploadedBytes < this.size) { + // Not all bytes were uploaded to the server. So upload part again. + throw new Error('Incomplete part.'); + } + + const parts = await this.listParts(this.index, 1); + + if (parts && parts.length === 1 && parts[0].offset === this.offset && parts[0].part_id) { + this.consoleLog(`Part ${this.toJSON()} is available on server. Not re-uploading.`); + this.id = parts[0].part_id; + this.uploadSuccessHandler({ + part: parts[0] + }); + return; + } + this.consoleLog(`Part ${this.toJSON()} is not available on server. Re-uploading.`); + throw new Error('Part not found on the server'); + } catch (error) { + const { response } = error; + if (response && response.status) { + this.consoleLog(`Error ${response.status} while listing part ${this.toJSON()}. Re-uploading.`); + } + this.numUploadRetriesPerformed += 1; + this.upload(); + } + }; + + /** + * Cancels upload for this Part. + * + * @return {void} + */ + cancel(): void { + if (this.xhr && typeof this.xhr.abort === 'function') { + this.xhr.abort(); + } + + clearTimeout(this.retryTimeout); + this.blob = null; + this.data = {}; + this.destroy(); + } + + /** + * List specified parts + * + * @param {number} partIndex - Index of starting part. Optional. + * @param {number} limit - Number of parts to be listed. Optional. + * @return {Promise>} Array of parts + */ + listParts = async (partIndex: number, limit: number): Promise> => { + const params = { + offset: partIndex, + limit + }; + + const endpoint = updateQueryParameters(this.sessionEndpoints.listParts, params); + const response = await this.xhr.get({ + url: endpoint + }); + + return response.entries; + }; } export default MultiputPart; diff --git a/src/api/uploads/MultiputUpload.js b/src/api/uploads/MultiputUpload.js index 3d8390f813..71b83f362f 100644 --- a/src/api/uploads/MultiputUpload.js +++ b/src/api/uploads/MultiputUpload.js @@ -4,27 +4,34 @@ * @author Box */ -import Base from '../Base'; -import { getFileLastModifiedAsISONoMSIfPossible } from '../../util/uploads'; +import noop from 'lodash.noop'; +import BaseMultiput from './BaseMultiput'; +import { getFileLastModifiedAsISONoMSIfPossible, getBoundedExpBackoffRetryDelay } from '../../util/uploads'; +import { retryNumOfTimes } from '../../util/function'; +import { digest } from '../../util/webcrypto'; import createWorker from '../../util/uploadsSHA1Worker'; -import MultiputPart, { PART_STATE_UPLOADED, PART_STATE_DIGEST_READY } from './MultiputPart'; -import type { StringAnyMap, MultiputConfig } from '../../flowTypes'; - -const DEFAULT_MULTIPUT_CONFIG: MultiputConfig = { - console: false, // Whether to display informational messages to console - digestReadahead: 5, // How many parts past those currently uploading to precompute digest for - initialRetryDelayMs: 5000, // Base for exponential backoff on retries - maxRetryDelayMs: 60000, // Upper bound for time between retries - parallelism: 5, // Maximum number of parts to upload at a time - requestTimeoutMs: 120000, // Idle timeout on part upload, overall request timeout on other requests - // eslint-disable-next-line max-len - retries: 5 // How many times to retry requests such as upload part or commit. Note that total number of attempts will be retries + 1 in worst case where all attempts fail. -}; - -class MultiputUpload extends Base { +import MultiputPart, { PART_STATE_UPLOADED, PART_STATE_DIGEST_READY, PART_STATE_NOT_STARTED } from './MultiputPart'; +import type { StringAnyMap, MultiputConfig, Options } from '../../flowTypes'; + +// Constants used for specifying log event types. +/* eslint-disable no-unused-vars */ +const LOG_EVENT_TYPE_COMMIT_CONFLICT = 'commit_conflict'; +// This type is a catch-all for create session errors that aren't 5xx's (for which we'll do +// retries) and aren't specific 4xx's we know how to specifically handle (e.g. out of storage). +const LOG_EVENT_TYPE_CREATE_SESSION_MISC_ERROR = 'create_session_misc_error'; +const LOG_EVENT_TYPE_CREATE_SESSION_RETRIES_EXCEEDED = 'create_session_retries_exceeded'; +const LOG_EVENT_TYPE_FILE_CHANGED_DURING_UPLOAD = 'file_changed_during_upload'; +const LOG_EVENT_TYPE_PART_UPLOAD_RETRIES_EXCEEDED = 'part_upload_retries_exceeded'; +const LOG_EVENT_TYPE_COMMIT_RETRIES_EXCEEDED = 'commit_retries_exceeded'; +const LOG_EVENT_TYPE_WEB_WORKER_ERROR = 'web_worker_error'; +const LOG_EVENT_TYPE_FILE_READER_RECEIVED_NOT_FOUND_ERROR = 'file_reader_received_not_found_error'; +const LOG_EVENT_TYPE_PART_DIGEST_RETRIES_EXCEEDED = 'part_digest_retries_exceeded'; +const LOG_EVENT_TYPE_LOGGED_OUT = 'logged_out'; +/* eslint-enable no-unused-vars */ + +class MultiputUpload extends BaseMultiput { clientId: ?string; commitRetryCount: number; - config: Object; createSessionNumRetriesPerformed: number; destinationFileId: ?string; destinationFolderId: ?string; @@ -33,10 +40,10 @@ class MultiputUpload extends Base { firstUnuploadedPartIndex: number; initialFileLastModified: ?string; initialFileSize: number; - onCompleted: ?Function; - onFailure: ?Function; - onProgress: ?Function; - options: Object; + onSuccess: Function; + onError: Function; + onProgress: Function; + options: Options; partSize: number; parts: Array; numPartsDigestComputing: number; @@ -45,29 +52,47 @@ class MultiputUpload extends Base { numPartsUploaded: number; numPartsUploading: number; sessionEndpoints: Object; - sessionId: ?string; + sessionId: string; totalUploadedBytes: number; worker: Worker; + createSessionTimeout: ?number; /** * [constructor] * - * @param {Object} options + * @param {Options} options * @param {File} file * @param {string} createSessionUrl * @param {string} [destinationFolderId] - Untyped folder id (e.g. no "d_" prefix) * @param {string} [destinationFileId] - Untyped file id (e.g. no "f_" prefix) * @param {MultiputConfig} [config] + * @param {Function} [onError] + * @param {Function} [onProgress] + * @param {Function} [onSuccess] */ constructor( - options: Object, + options: Options, file: File, createSessionUrl: string, destinationFolderId?: ?string, destinationFileId?: ?string, - config?: MultiputConfig + config?: MultiputConfig, + onError?: Function, + onProgress?: Function, + onSuccess?: Function ) { - super(options); + super( + options, + { + createSession: createSessionUrl, + uploadPart: null, + listParts: null, + commit: null, + abort: null, + logEvent: null + }, + config + ); if (destinationFolderId !== null && destinationFileId !== null) { throw new Error('Both destinationFolderId and destinationFileId set'); @@ -78,19 +103,8 @@ class MultiputUpload extends Base { // a file change during the upload. this.initialFileSize = this.file.size; this.initialFileLastModified = getFileLastModifiedAsISONoMSIfPossible(this.file); - - this.sessionEndpoints = { - createSession: createSessionUrl, - uploadPart: null, - listParts: null, - commit: null, - abort: null, - logEvent: null - }; - this.destinationFolderId = destinationFolderId; this.destinationFileId = destinationFileId; - this.config = config || DEFAULT_MULTIPUT_CONFIG; this.fileSha1 = null; this.totalUploadedBytes = 0; this.numPartsNotStarted = 0; // # of parts yet to be processed @@ -99,9 +113,9 @@ class MultiputUpload extends Base { this.numPartsUploading = 0; // # of parts with upload requests currently inflight this.numPartsUploaded = 0; // # of parts successfully uploaded this.firstUnuploadedPartIndex = 0; // Index of first part that hasn't been uploaded yet. - this.onProgress = null; - this.onCompleted = null; - this.onFailure = null; + this.onProgress = onProgress || noop; + this.onSuccess = onSuccess || noop; + this.onError = onError || noop; this.createSessionNumRetriesPerformed = 0; this.partSize = 0; this.parts = []; @@ -139,22 +153,73 @@ class MultiputUpload extends Base { try { const data = await this.xhr.post({ url: this.sessionEndpoints.createSessionUrl, data: postData }); - this.uploadSessionSuccessHandler(data); + this.createUploadSessionSuccessHandler(data); } catch (error) { - this.uploadSessionErrorHandler(error); + const { response } = error; + + if (response.status >= 500 && response.status < 600) { + this.createUploadSessionErrorHandler(response); + } + + // Recover from 409 session_conflict. The server will return the session information + // in context_info, so treat it as a success. + if (response && response.status === 409 && response.code === 'session_conflict') { + this.createUploadSessionSuccessHandler(response.context_info.session); + return; + } + + if ( + (response && (response.status === 403 && response.code === 'storage_limit_exceeded')) || + (response.status === 403 && response.code === 'access_denied_insufficient_permissions') || + (response.status === 409 && response.code === 'item_name_in_use') + ) { + this.onError(response); + return; + } + + // All other cases get treated as an upload failure. + this.sessionErrorHandler(response, LOG_EVENT_TYPE_CREATE_SESSION_MISC_ERROR, JSON.stringify(response)); } }; /** - * Upload session session error handler. + * Create session error handler. * Retries the create session request or fails the upload. - * TODO: implement this * * @private + * @param {Object} response * @return {void} */ - // eslint-disable-next-line - uploadSessionErrorHandler = (error: Error): void => {}; + createUploadSessionErrorHandler = (response: Object): void => { + if (this.isDestroyed()) { + return; + } + + if (this.createSessionNumRetriesPerformed < this.config.retries) { + this.createUploadSessionRetry(); + return; + } + + this.consoleLog('Too many create session failures, failing upload'); + this.sessionErrorHandler(response, LOG_EVENT_TYPE_CREATE_SESSION_RETRIES_EXCEEDED, JSON.stringify(response)); + }; + + /** + * Schedule a retry for create session request upon failure + * + * @private + * @return {void} + */ + createUploadSessionRetry = (): void => { + const retryDelayMs = getBoundedExpBackoffRetryDelay( + this.config.initialRetryDelayMs, + this.config.maxRetryDelayMs, + this.createSessionNumRetriesPerformed + ); + this.createSessionNumRetriesPerformed += 1; + this.consoleLog(`Retrying create session in ${retryDelayMs} ms`); + this.createSessionTimeout = setTimeout(this.createUploadSession, retryDelayMs); + }; /** * Handles a upload session success response @@ -163,7 +228,7 @@ class MultiputUpload extends Base { * @param {Object} data - Upload session creation success data * @return {void} */ - uploadSessionSuccessHandler = (data: any): void => { + createUploadSessionSuccessHandler = (data: any): void => { if (this.isDestroyed()) { return; } @@ -185,6 +250,94 @@ class MultiputUpload extends Base { this.processNextParts(); }; + /** + * Session error handler. + * Retries the create session request or fails the upload. + * + * @private + * @param {?Object} response + * @param {string} logEventType + * @param {string} [logMessage] + * @return {void} + */ + sessionErrorHandler = async (response: ?Object, logEventType: string, logMessage?: string): Promise<> => { + this.destroy(); + this.onError(response); + + try { + await retryNumOfTimes( + (resolve: Function, reject: Function): void => { + this.logEvent(logEventType, logMessage).then(resolve).catch(reject); + }, + this.config.retries, + this.config.initialRetryDelayMs + ); + + this.abortSession(); + } catch (err) { + this.abortSession(); + } + }; + + /** + * Aborts the upload session + * + * @private + * @return {void} + */ + abortSession = (): void => { + if (this.worker) { + this.worker.terminate(); + } + + if (this.sessionEndpoints.abort) { + this.xhr.delete(this.sessionEndpoints.abort); + } + }; + + /** + * Part upload success handler + * + * @private + * @param {MultiputPart} part + * @return {void} + */ + partUploadSuccessHandler = (part: MultiputPart): void => { + this.numPartsUploading -= 1; + this.numPartsUploaded += 1; + this.updateProgress(part.uploadedBytes, part.size); + this.processNextParts(); + }; + + /** + * Part upload error handler + * + * @private + * @param {Error} error + * @param {string} eventInfo + * @return {void} + */ + partUploadErrorHandler = (error: Error, eventInfo: string): void => { + this.sessionErrorHandler(null, LOG_EVENT_TYPE_PART_UPLOAD_RETRIES_EXCEEDED, eventInfo); + }; + + /** + * Update upload progress + * + * @private + * @param {number} prevUploadedBytes + * @param {number} newUploadedBytes + * @return {void} + */ + updateProgress = (prevUploadedBytes: number, newUploadedBytes: number): void => { + if (this.isDestroyed()) { + return; + } + + this.totalUploadedBytes += newUploadedBytes - prevUploadedBytes; + this.onProgress(this.totalUploadedBytes); + }; + /** * Attempts to process more parts, except in the case where everything is done or we detect * a file change (in which case we want to abort and not process more parts). @@ -208,9 +361,127 @@ class MultiputUpload extends Base { this.uploadNextPart(); } - // TODO: compute digests for parts + if (this.shouldComputeDigestForNextPart()) { + this.computeDigestForNextPart(); + } + }; + + /** + * We compute digest for parts one at a time. This is done for simplicity and also to guarantee that + * we send parts in order to the web worker (which is computing the digest for the entire file). + * + * @private + * @return {boolean} true if there is work to do, false otherwise. + */ + shouldComputeDigestForNextPart = (): boolean => + !this.isDestroyed() && + this.numPartsDigestComputing === 0 && + this.numPartsNotStarted > 0 && + this.numPartsDigestReady < this.config.digestReadahead; + + /** + * Find first part in parts array that doesn't have a digest, and compute its digest. + + * @private + * @return {void} + */ + computeDigestForNextPart = (): void => { + for (let i = this.firstUnuploadedPartIndex; i < this.parts.length; i += 1) { + const part = this.parts[i]; + if (part.state === PART_STATE_NOT_STARTED) { + // Update the counters here instead of computeDigestForPart because computeDigestForPart + // can get called on retries + this.numPartsNotStarted -= 1; + this.numPartsDigestComputing += 1; + this.computeDigestForPart(part); + return; + } + } }; + /** + * Read a blob with FileReader + * + * @param {FileReader} reader + * @param {Blob} blob + * @return {Promise} + */ + readFile = (reader: FileReader, blob: Blob): Promise<> => + new Promise((resolve, reject) => { + reader.readAsArrayBuffer(blob); + reader.onload = () => { + resolve({ + buffer: reader.result, + readCompleteTimestamp: Date.now() + }); + }; + reader.onerror = reject; + }); + + /** + * Compute digest for this part + * + * @private + * @param {MultiputPart} part + * @return {Promise} + */ + computeDigestForPart = async (part: MultiputPart): Promise<> => { + const blob = this.file.slice(part.offset, part.offset + part.size); + const reader = new window.FileReader(); + const startTimestamp = Date.now(); + + try { + const { + buffer, + readCompleteTimestamp + }: { buffer: ArrayBuffer, readCompleteTimestamp: number } = await this.readFile(reader, blob); + const sha256 = await digest('SHA-256', buffer); + + this.sendPartToWorker(part, buffer); + + part.sha256 = sha256; + part.state = PART_STATE_DIGEST_READY; + part.blob = blob; + + this.numPartsDigestReady += 1; + const digestCompleteTimestamp = Date.now(); + + part.timing = { + partDigestTime: digestCompleteTimestamp - startTimestamp, + readTime: readCompleteTimestamp - startTimestamp, + subtleCryptoTime: digestCompleteTimestamp - readCompleteTimestamp + }; + + this.processNextParts(); + } catch (error) { + this.onPartDigestError(error); + } + }; + + /** + * Sends a part to the worker + * + * @private + * @param {MultiputPart} part + * @param {ArrayBuffer} buffer + * @return {void} + * TODO: implement this + */ + // eslint-disable-next-line + sendPartToWorker = (part: MultiputPart, buffer: ArrayBuffer): void => {}; + + /** + * Error handler for part digest computation + * + * @private + * @param {MultiputPart} part + * @param {ArrayBuffer} buffer + * @return {void} + * TODO: implement this + */ + // eslint-disable-next-line + onPartDigestError = (error: Error): void => {}; + /** * Send a request to commit the upload. * TODO: implement this @@ -240,6 +511,16 @@ class MultiputUpload extends Base { */ commitSessionErrorHandler = (): void => {}; + /** + * Retry commit. + * Retries the commit or fails the multiput session. + * TODO: implement this + * + * @private + * @return {void} + */ + commitSessionRetry = (): void => {}; + /** * Find first part in parts array that we can upload, and upload it. * @@ -285,6 +566,13 @@ class MultiputUpload extends Base { } }; + /** + * Get number of parts being uploaded + * + * @return {number} + */ + getNumPartsUploading = (): number => this.numPartsUploading; + /** * After session is created and we know the part size, populate the parts * array. @@ -301,7 +589,14 @@ class MultiputUpload extends Base { this.options, i, offset, - Math.min(offset + this.partSize, this.file.size) - offset + Math.min(offset + this.partSize, this.file.size) - offset, + this.sessionId, + this.sessionEndpoints, + this.config, + this.getNumPartsUploading, + this.partUploadSuccessHandler, + this.updateProgress, + this.partUploadErrorHandler ); this.parts.push(part); } diff --git a/src/api/uploads/__tests__/BaseMultiput-test.js b/src/api/uploads/__tests__/BaseMultiput-test.js new file mode 100644 index 0000000000..0a982fb97f --- /dev/null +++ b/src/api/uploads/__tests__/BaseMultiput-test.js @@ -0,0 +1,75 @@ +/* eslint-disable no-unused-expressions, no-underscore-dangle */ +import { withData } from 'leche'; + +import BaseMultiput from '../BaseMultiput'; + +const sandbox = sinon.sandbox.create(); + +describe('api/BaseMultiput', () => { + let BaseMultiputTest; + beforeEach(() => { + BaseMultiputTest = new BaseMultiput( + { + consoleLog: true + }, + {}, + {} + ); + }); + + afterEach(() => { + sandbox.verifyAndRestore(); + }); + + describe('logEvent()', () => { + const event_type = 'event_type'; + const event_info = 'event_info'; + + withData( + [ + [ + null, + { + event_type + } + ], + [ + event_info, + { + event_type, + event_info + } + ] + ], + (eventInfo, expectedData) => { + it('should POST to the correct endpoint', async () => { + BaseMultiputTest.sessionEndpoints.logEvent = 'logEvent'; + BaseMultiputTest.xhr.post = sandbox + .mock() + .withArgs({ + url: 'logEvent', + data: expectedData + }) + .returns('expected'); + + assert.equal(await BaseMultiputTest.logEvent(event_type, eventInfo), 'expected'); + }); + } + ); + }); + + describe('consoleLogFunc()', () => { + it('should not call msgFunc when canConsoleLog is false', async () => { + BaseMultiputTest.canConsoleLog = false; + + BaseMultiputTest.consoleLogFunc(sandbox.mock().never()); + }); + + it('should console log the return value of msgFunc when canConsoleLog is true', async () => { + BaseMultiputTest.canConsoleLog = true; + BaseMultiputTest.consoleLog = sandbox.mock().withArgs('expected'); + + BaseMultiputTest.consoleLogFunc(sandbox.mock().returns('expected')); + }); + }); +}); diff --git a/src/api/uploads/__tests__/MultiputPart-test.js b/src/api/uploads/__tests__/MultiputPart-test.js new file mode 100644 index 0000000000..0ed68f0e36 --- /dev/null +++ b/src/api/uploads/__tests__/MultiputPart-test.js @@ -0,0 +1,273 @@ +/* eslint-disable no-unused-expressions, no-underscore-dangle */ +import { withData } from 'leche'; +import MultiputPart, { PART_STATE_UPLOADED } from '../MultiputPart'; + +const sandbox = sinon.sandbox.create(); + +describe('api/MultiputPart', () => { + const options = {}; + const index = 0; + const offset = 0; + const size = 1; + const sessionId = 1; + const sessionEndpoints = {}; + const config = {}; + const getNumPartsUploading = sandbox.stub(); + let MultiputPartTest; + beforeEach(() => { + MultiputPartTest = new MultiputPart( + options, + index, + offset, + size, + sessionId, + sessionEndpoints, + config, + getNumPartsUploading + ); + }); + + afterEach(() => { + sandbox.verifyAndRestore(); + }); + + describe('upload()', () => { + it('should throw error if sha256 is not available', () => { + MultiputPartTest.destroyed = false; + MultiputPartTest.blob = {}; + + MultiputPartTest.xhr.uploadFile = sandbox.mock().never(); + + assert.throws(() => { + MultiputPartTest.upload(); + }); + }); + + it('should throw error if blob is not available', () => { + MultiputPartTest.destroyed = false; + MultiputPartTest.sha256 = '123'; + + MultiputPartTest.xhr.uploadFile = sandbox.mock().never(); + + assert.throws(() => { + MultiputPartTest.upload(); + }); + }); + + it('should upload file properly', () => { + MultiputPartTest.destroyed = false; + MultiputPartTest.sha256 = '123'; + MultiputPartTest.blob = {}; + MultiputPartTest.xhr.uploadFile = sandbox.mock(); + + MultiputPartTest.upload(); + }); + }); + + describe('uploadSuccessHandler()', () => { + it('should noop if destroyed', () => { + MultiputPartTest.destroyed = true; + + MultiputPartTest.onSuccess = sandbox.mock().never(); + + MultiputPartTest.uploadSuccessHandler(); + }); + + it('should call onSuccess and update attributes properly', () => { + const data = { hi: 1 }; + MultiputPartTest.destroyed = false; + MultiputPartTest.onSuccess = sandbox.mock(); + + MultiputPartTest.uploadSuccessHandler(data); + + assert.equal(MultiputPartTest.data, data); + assert.isNull(MultiputPartTest.blob); + assert.equal(MultiputPartTest.state, PART_STATE_UPLOADED); + }); + }); + + describe('uploadProgressHandler()', () => { + it('should noop if destroyed', () => { + MultiputPartTest.destroyed = true; + + MultiputPartTest.onSuccess = sandbox.mock().never(); + + MultiputPartTest.uploadProgressHandler(); + }); + + it('should call onProgress and update attributes properly', () => { + const event = { loaded: 1 }; + MultiputPartTest.destroyed = false; + MultiputPartTest.onProgress = sandbox.mock(); + + MultiputPartTest.uploadProgressHandler(event); + + assert.equal(MultiputPartTest.uploadedBytes, 1); + }); + }); + + describe('uploadErrorHandler()', () => { + beforeEach(() => { + MultiputPartTest.xhr = { + xhr: { + readyState: 'readyState', + statusText: 'statusText' + } + }; + }); + it('should noop if destroyed', () => { + MultiputPartTest.destroyed = true; + + MultiputPartTest.onSuccess = sandbox.mock().never(); + + MultiputPartTest.uploadErrorHandler(); + }); + + it('should log error, and call onError when retry is exhausted', () => { + const error = { message: 'no' }; + MultiputPartTest.destroyed = false; + MultiputPartTest.numUploadRetriesPerformed = 100; + MultiputPartTest.config.retries = 1; + MultiputPartTest.logEvent = sandbox.mock(); + MultiputPartTest.onError = sandbox.mock(); + + MultiputPartTest.uploadErrorHandler(error); + }); + + it('should retry upload after delay when retry is not exhausted', () => { + MultiputPart.__Rewire__('getBoundedExpBackoffRetryDelay', sandbox.mock().returns(10)); + const error = { message: 'no' }; + MultiputPartTest.destroyed = false; + MultiputPartTest.numUploadRetriesPerformed = 100; + MultiputPartTest.config.retries = 1000; + MultiputPartTest.logEvent = sandbox.mock(); + MultiputPartTest.onError = sandbox.mock().never(); + + MultiputPartTest.uploadErrorHandler(error); + setTimeout(() => { + sandbox.mock(MultiputPartTest).expect('retryUpload'); + assert.equal(MultiputPartTest.numUploadRetriesPerformed, 101); + }, 100); + MultiputPart.__ResetDependency__('getBoundedExpBackoffRetryDelay'); + }); + }); + + describe('retryUpload()', () => { + it('should noop if destroyed', () => { + MultiputPartTest.destroyed = true; + + MultiputPartTest.onSuccess = sandbox.mock().never(); + + MultiputPartTest.retryUpload(); + }); + + it('should call upload when upload is incomplete', async () => { + MultiputPartTest.destroyed = false; + MultiputPartTest.uploadedBytes = 1; + MultiputPartTest.size = 100; + MultiputPartTest.numUploadRetriesPerformed = 0; + MultiputPartTest.upload = sandbox.mock(); + + await MultiputPartTest.retryUpload(); + + assert.equal(MultiputPartTest.numUploadRetriesPerformed, 1); + }); + + it('should call uploadSuccessHandler when upload is already available on the server', async () => { + const part = { + offset: 1, + part_id: 1 + }; + const parts = [part]; + + MultiputPartTest.destroyed = false; + MultiputPartTest.uploadedBytes = 100; + MultiputPartTest.size = 100; + MultiputPartTest.offset = 1; + MultiputPartTest.numUploadRetriesPerformed = 0; + MultiputPartTest.upload = sandbox.mock().never(); + MultiputPartTest.uploadSuccessHandler = sandbox.mock().withArgs({ + part + }); + MultiputPartTest.listParts = sandbox.mock().resolves(parts); + + await MultiputPartTest.retryUpload(); + }); + + withData( + [ + [ + { + offset: 1, + part_id: 1 + }, + { + offset: 1, + part_id: 1 + } + ], + [ + { + offset: 1 + } + ], + [ + { + offset: 2, + part_id: 1 + } + ] + ], + (parts) => { + it('should call upload when upload is not available on the server', async () => { + MultiputPartTest.destroyed = false; + MultiputPartTest.uploadedBytes = 100; + MultiputPartTest.size = 100; + MultiputPartTest.numUploadRetriesPerformed = 0; + MultiputPartTest.upload = sandbox.mock(); + MultiputPartTest.uploadSuccessHandler = sandbox.mock().never(); + MultiputPartTest.listParts = sandbox.mock().resolves(parts); + + await MultiputPartTest.retryUpload(); + + assert.equal(MultiputPartTest.numUploadRetriesPerformed, 1); + }); + } + ); + }); + + describe('cancel()', () => { + it('should tear down properly', () => { + MultiputPartTest.xhr = { + abort: sandbox.mock() + }; + MultiputPartTest.blob = new Blob(); + MultiputPartTest.data = { hi: 1 }; + MultiputPartTest.destroy = sandbox.mock(); + + MultiputPartTest.cancel(); + + assert.isNull(MultiputPartTest.blob); + assert.deepEqual(MultiputPartTest.data, {}); + }); + }); + + describe('listParts()', () => { + it('should GET from correct endpoint and return entries', async () => { + const endpoint = 'www.box.com'; + const entries = [1]; + MultiputPart.__Rewire__('updateQueryParameters', sandbox.mock().returns(endpoint)); + MultiputPartTest.xhr = { + get: sandbox.mock().resolves({ + entries + }) + }; + + const res = await MultiputPartTest.listParts(1, 1); + + assert.equal(res, entries); + + MultiputPart.__ResetDependency__('updateQueryParameters'); + }); + }); +}); diff --git a/src/api/uploads/__tests__/MultiputUpload-test.js b/src/api/uploads/__tests__/MultiputUpload-test.js index 01e2a64a78..d140cb5afd 100644 --- a/src/api/uploads/__tests__/MultiputUpload-test.js +++ b/src/api/uploads/__tests__/MultiputUpload-test.js @@ -5,7 +5,8 @@ import MultiputUpload from '../MultiputUpload'; import MultiputPart, { PART_STATE_UPLOADED, PART_STATE_COMPUTING_DIGEST, - PART_STATE_DIGEST_READY + PART_STATE_DIGEST_READY, + PART_STATE_NOT_STARTED } from '../MultiputPart'; const sandbox = sinon.sandbox.create(); @@ -39,9 +40,9 @@ describe('api/MultiputUpload', () => { // Expectations const expectedParts = [ - new MultiputPart(config, 0, 0, 400000), - new MultiputPart(config, 1, 400000, 400000), - new MultiputPart(config, 2, 800000, 200000) + new MultiputPart(config, 0, 0, 400000, 1, { upload_part: 'www.box.com' }), + new MultiputPart(config, 1, 400000, 400000, 1, { upload_part: 'www.box.com' }), + new MultiputPart(config, 2, 800000, 200000, 1, { upload_part: 'www.box.com' }) ]; // Execute @@ -62,9 +63,9 @@ describe('api/MultiputUpload', () => { multiputUploadTest.firstUnuploadedPartIndex = 0; multiputUploadTest.numPartsUploading = 0; multiputUploadTest.parts = [ - new MultiputPart(config, 0, 0, 1024), - new MultiputPart(config, 1, 1024, 1024), - new MultiputPart(config, 2, 2048, 1024) + new MultiputPart(config, 0, 0, 1024, 1, { upload_part: 'www.box.com' }), + new MultiputPart(config, 1, 1024, 1024, 1, { upload_part: 'www.box.com' }), + new MultiputPart(config, 2, 2048, 1024, 1, { upload_part: 'www.box.com' }) ]; }); @@ -134,9 +135,9 @@ describe('api/MultiputUpload', () => { beforeEach(() => { multiputUploadTest.firstUnuploadedPartIndex = 0; multiputUploadTest.parts = [ - new MultiputPart(config, 0, 0, 1024), - new MultiputPart(config, 1, 1024, 1024), - new MultiputPart(config, 2, 2048, 1024) + new MultiputPart(config, 0, 0, 1024, 1, { upload_part: 'www.box.com' }), + new MultiputPart(config, 1, 1024, 1024, 1, { upload_part: 'www.box.com' }), + new MultiputPart(config, 2, 2048, 1024, 1, { upload_part: 'www.box.com' }) ]; }); @@ -203,9 +204,9 @@ describe('api/MultiputUpload', () => { // Expectations const expectedParts = [ - new MultiputPart(config, 0, 0, 400000), - new MultiputPart(config, 1, 400000, 400000), - new MultiputPart(config, 2, 800000, 200000) + new MultiputPart(config, 0, 0, 400000, 1, { upload_part: 'www.box.com' }), + new MultiputPart(config, 1, 400000, 400000, 1, { upload_part: 'www.box.com' }), + new MultiputPart(config, 2, 800000, 200000, 1, { upload_part: 'www.box.com' }) ]; // Execute @@ -221,7 +222,7 @@ describe('api/MultiputUpload', () => { }); }); - describe('uploadSessionSuccessHandler()', () => { + describe('createUploadSessionSuccessHandler()', () => { const data = { id: 1, part_size: 1, @@ -243,7 +244,7 @@ describe('api/MultiputUpload', () => { sandbox.mock(multiputUploadTest).expects('processNextParts').never(); // Execute - multiputUploadTest.uploadSessionSuccessHandler(data); + multiputUploadTest.createUploadSessionSuccessHandler(data); }); it('should update attributes properly, populate parts and process parts when not destroyed', () => { @@ -257,7 +258,7 @@ describe('api/MultiputUpload', () => { sandbox.mock(multiputUploadTest).expects('processNextParts'); // Execute - multiputUploadTest.uploadSessionSuccessHandler(data); + multiputUploadTest.createUploadSessionSuccessHandler(data); // Verify assert.equal(multiputUploadTest.sessionId, data.id); @@ -281,25 +282,342 @@ describe('api/MultiputUpload', () => { await multiputUploadTest.createUploadSession(); }); - it('should call uploadSessionSuccessHandler when the session is created successfully', async () => { + it('should call createUploadSessionSuccessHandler when the session is created successfully', async () => { const data = { a: 2 }; multiputUploadTest.destroyed = false; multiputUploadTest.xhr.post = sandbox.mock().resolves(data); - multiputUploadTest.uploadSessionSuccessHandler = sandbox.mock().withArgs(data); + multiputUploadTest.createUploadSessionSuccessHandler = sandbox.mock().withArgs(data); await multiputUploadTest.createUploadSession(); }); - it('should call uploadSessionErrorHandler when the session creation failed', async () => { - const error = { no: 2 }; + it('should call createUploadSessionErrorHandler when the session creation failed', async () => { + const error = { + response: { + status: 500 + } + }; multiputUploadTest.destroyed = false; multiputUploadTest.xhr.post = sandbox.mock().rejects(error); - multiputUploadTest.uploadSessionErrorHandler = sandbox.mock().withArgs(error); + multiputUploadTest.createUploadSessionErrorHandler = sandbox.mock().withArgs(error.response); await multiputUploadTest.createUploadSession(); }); + + it('should invoke createUploadSessionSuccessHandler on 409 session_conflict', async () => { + // Setup + const response = { + status: 409, + code: 'session_conflict', + context_info: { + session: { + part_size: 1234, + session_endpoints: { + upload_part: 'testUploadPartUrl', + commit: 'testCommitUrl', + abort: 'testAbortUrl' + } + } + } + }; + + // Expectations + sandbox + .mock(multiputUploadTest) + .expects('createUploadSessionSuccessHandler') + .withExactArgs(response.context_info.session); + + // Execute + multiputUploadTest.xhr.post = sandbox.mock().rejects({ + response + }); + + await multiputUploadTest.createUploadSession(); + }); + + withData( + { + 'file name conflict': [{ code: 'item_name_in_use', status: 409 }], + 'storage limit exceeded': [{ code: 'storage_limit_exceeded', status: 403 }], + 'insufficient permissions': [{ code: 'access_denied_insufficient_permissions', status: 403 }] + }, + (response) => { + it('should invoke onError but not sessionErrorHandler on expected failure', async () => { + // Setup + + multiputUploadTest.onError = sandbox.mock().withArgs(response); + + // Expectations + sandbox.mock(multiputUploadTest).expects('sessionErrorHandler').never(); + + // Execute + multiputUploadTest.xhr.post = sandbox.mock().rejects({ + response + }); + await multiputUploadTest.createUploadSession(); + }); + } + ); + + withData( + { + 'maybeResponse null': [{ status: 403 }], + 'no code': [{ status: 403, a: 1 }], + '403 with code that is not storage_limit_exceeded': [{ status: '403', code: 'foo' }], + '409 with code that is not item_name_in_use': [{ status: 409, code: 'foo' }] + }, + (response) => { + it('should invoke sessionErrorHandler on other non-201 status code', async () => { + // Expectations + sandbox + .mock(multiputUploadTest) + .expects('sessionErrorHandler') + .withExactArgs(response, 'create_session_misc_error', JSON.stringify(response)); + // Execute + multiputUploadTest.xhr.post = sandbox.mock().rejects({ + response + }); + await multiputUploadTest.createUploadSession(); + }); + } + ); + }); + + describe('createUploadSessionErrorHandler()', () => { + it('should should noop when isDestroyed', () => { + // Expectations + sandbox.mock(multiputUploadTest).expects('isDestroyed').returns(true); + + sandbox.mock(multiputUploadTest).expects('createUploadSessionRetry').never(); + sandbox.mock(multiputUploadTest).expects('sessionErrorHandler').never(); + + multiputUploadTest.createUploadSessionErrorHandler(); + }); + + it('should retry if retries not exhausted', () => { + // Expectations + sandbox.mock(multiputUploadTest).expects('createUploadSessionRetry'); + // Execute + multiputUploadTest.createUploadSessionErrorHandler(); + }); + + it('should fail if retries exhausted', () => { + // Setup + multiputUploadTest.config.retries = 3; + multiputUploadTest.createSessionNumRetriesPerformed = 100; + const response = { test: 1 }; + // Expectations + sandbox + .mock(multiputUploadTest) + .expects('sessionErrorHandler') + .withArgs(response, 'create_session_retries_exceeded', JSON.stringify(response)); + // Execute + multiputUploadTest.createUploadSessionErrorHandler(response); + }); + }); + + describe('createUploadSessionRetry()', () => { + it('should call createSession again after exponential backoff based on retry count', () => { + // Setup + multiputUploadTest.createSessionNumRetriesPerformed = 5; + const mock = sandbox.mock(multiputUploadTest); + // Expectations + MultiputUpload.__Rewire__( + 'getBoundedExpBackoffRetryDelay', + sandbox + .mock() + .withExactArgs( + multiputUploadTest.config.initialRetryDelayMs, + multiputUploadTest.config.maxRetryDelayMs, + multiputUploadTest.createSessionNumRetriesPerformed + ) + .returns(10) + ); + + // Execute + multiputUploadTest.createUploadSessionRetry(); + // Verify + setTimeout(() => { + mock.expects('createUploadSession'); + assert.equal( + multiputUploadTest.createSessionNumRetriesPerformed, + 6, + 'createSessionNumRetriesPerformed was not incremented' + ); + }, 100); + + MultiputUpload.__ResetDependency__('getBoundedExpBackoffRetryDelay'); + }); + }); + + describe('sessionErrorHandler()', () => { + it('should destroy, log and call error handler properly', async () => { + MultiputUpload.__Rewire__('retryNumOfTimes', sandbox.mock().resolves()); + multiputUploadTest.destroy = sandbox.mock(); + multiputUploadTest.onError = sandbox.mock(); + multiputUploadTest.abortSession = sandbox.mock(); + + await multiputUploadTest.sessionErrorHandler(null, '123', '123'); + MultiputUpload.__ResetDependency__('retryNumOfTimes'); + }); + }); + + describe('abortSession()', () => { + it('should terminate the worker and abort session', () => { + multiputUploadTest.worker = { + terminate: sandbox.mock() + }; + multiputUploadTest.xhr.delete = sandbox.mock(); + multiputUploadTest.sessionEndpoints.abort = 'foo'; + + multiputUploadTest.abortSession(null, '123', '123'); + }); + }); + + describe('partUploadSuccessHandler()', () => { + it('should update the part uploading progress and upload next parts', () => { + const part = { + uploadedBytes: 1, + size: 1 + }; + multiputUploadTest.numPartsUploading = 10; + multiputUploadTest.numPartsUploaded = 10; + multiputUploadTest.updateProgress = sandbox.mock().withArgs(part.uploadedBytes, part.size); + multiputUploadTest.processNextParts = sandbox.mock(); + + multiputUploadTest.partUploadSuccessHandler(part); + }); + }); + + describe('updateProgress()', () => { + it('should call onProgress() properly', () => { + const prevUploadedBytes = 10; + const newUploadedBytes = 20; + + multiputUploadTest.totalUploadedBytes = 100; + multiputUploadTest.onProgress = sandbox.mock().withArgs(110); + + multiputUploadTest.updateProgress(prevUploadedBytes, newUploadedBytes); + }); + }); + + describe('updateProgress()', () => { + it('should call onProgress() properly', () => { + const prevUploadedBytes = 10; + const newUploadedBytes = 20; + + multiputUploadTest.totalUploadedBytes = 100; + multiputUploadTest.onProgress = sandbox.mock().withArgs(110); + + multiputUploadTest.updateProgress(prevUploadedBytes, newUploadedBytes); + }); + }); + + describe('shouldComputeDigestForNextPart()', () => { + beforeEach(() => { + multiputUploadTest.config.digestReadahead = 2; + }); + + withData( + { + 'ended is true': [false, true], + 'a part is already computing': [false, false, 1], + 'all parts started': [false, false, 0, 0], + 'readahead is full': [false, false, 0, 1, 2], + 'no part computing, there is a part not started, and readahead not full': [true, false, 0, 1, 1] + }, + (expected, ended, numPartsDigestComputing, numPartsNotStarted, numPartsDigestReady) => { + it('should return correct value', () => { + // Setup + multiputUploadTest.ended = ended; + multiputUploadTest.numPartsDigestComputing = numPartsDigestComputing; + multiputUploadTest.numPartsNotStarted = numPartsNotStarted; + multiputUploadTest.numPartsDigestReady = numPartsDigestReady; + + // Execute + const result = multiputUploadTest.shouldComputeDigestForNextPart(); + + // Verify + assert.equal(result, expected); + }); + } + ); + }); + + describe('computeDigestForNextPart()', () => { + beforeEach(() => { + multiputUploadTest.firstUnuploadedPartIndex = 0; + multiputUploadTest.numPartsDigestComputing = 0; + multiputUploadTest.parts = [ + new MultiputPart({}, 0, 0, 1024, 1, { upload_part: 'www.box.com' }), + new MultiputPart({}, 1, 1024, 1024, 1, { upload_part: 'www.box.com' }), + new MultiputPart({}, 2, 2048, 1024, 1, { upload_part: 'www.box.com' }) + ]; + }); + + it('should process first not started part by calling computeDigestForPart', () => { + multiputUploadTest.parts[0].state = PART_STATE_UPLOADED; + multiputUploadTest.parts[1].state = PART_STATE_COMPUTING_DIGEST; + multiputUploadTest.parts[2].state = PART_STATE_NOT_STARTED; + multiputUploadTest.numPartsNotStarted = 1; + + // Expectations + sandbox + .mock(multiputUploadTest) + .expects('computeDigestForPart') + .once() + .withArgs(multiputUploadTest.parts[2]); + + // Execute + multiputUploadTest.computeDigestForNextPart(); + + // Verify + assert.equal(multiputUploadTest.numPartsNotStarted, 0); + assert.equal(multiputUploadTest.numPartsDigestComputing, 1); + }); + + it('should process only one part', () => { + // Setup + multiputUploadTest.parts[0].state = PART_STATE_NOT_STARTED; + multiputUploadTest.parts[1].state = PART_STATE_NOT_STARTED; + multiputUploadTest.parts[2].state = PART_STATE_NOT_STARTED; + multiputUploadTest.numPartsNotStarted = 3; + + // Expectations + sandbox + .mock(multiputUploadTest) + .expects('computeDigestForPart') + .once() + .withArgs(multiputUploadTest.parts[0]); + + // Execute + multiputUploadTest.computeDigestForNextPart(); + + // Verify + assert.equal(multiputUploadTest.numPartsNotStarted, 2); + assert.equal(multiputUploadTest.numPartsDigestComputing, 1); + }); + }); + + describe('computeDigestForPart()', () => { + it('should read, compute digest, then send part to worker', async () => { + MultiputUpload.__Rewire__('digest', sandbox.mock().resolves()); + multiputUploadTest.sendPartToWorker = sandbox.mock(); + multiputUploadTest.readFile = sandbox.mock().resolves({ + buffer: new ArrayBuffer(), + readCompleteTimestamp: 123 + }); + multiputUploadTest.processNextParts = sandbox.mock(); + + await multiputUploadTest.computeDigestForPart({ + offset: 1, + size: 2 + }); + + MultiputUpload.__ResetDependency__('digest'); + }); }); describe('processNextParts()', () => { diff --git a/src/flowTypes.js b/src/flowTypes.js index 350212856a..5e0aef4369 100644 --- a/src/flowTypes.js +++ b/src/flowTypes.js @@ -243,7 +243,9 @@ export type Options = { cache?: Cache, apiHost?: string, uploadHost?: string, - responseFilter?: Function + responseFilter?: Function, + consoleLog?: boolean, + consoleError?: boolean }; export type Recent = { @@ -281,7 +283,6 @@ export type SkillData = { }; export type MultiputConfig = { - console: boolean, digestReadahead: number, initialRetryDelayMs: number, maxRetryDelayMs: number, @@ -289,3 +290,14 @@ export type MultiputConfig = { requestTimeoutMs: number, retries: number }; + +export type MultiputPart = { + offset: number, + part_id: string, + sha1: string, + size: number +}; + +export type MultiputData = { + part?: MultiputPart +}; diff --git a/src/util/uploads.js b/src/util/uploads.js index d863ef050c..768cf002fe 100644 --- a/src/util/uploads.js +++ b/src/util/uploads.js @@ -67,4 +67,17 @@ function tryParseJson(maybeJson: string): ?Object { } } -export { toISOStringNoMS, getFileLastModifiedAsISONoMSIfPossible, tryParseJson }; +/** + * Get bounded exponential backoff retry delay + * + * @param {number} initialRetryDelay + * @param {number} maxRetryDelay + * @param {number} retryNum - Current retry number (first retry will have value of 0). + * @return {number} + */ +function getBoundedExpBackoffRetryDelay(initialRetryDelay: number, maxRetryDelay: number, retryNum: number) { + const delay = initialRetryDelay * retryNum ** 2; + return delay > maxRetryDelay ? maxRetryDelay : delay; +} + +export { toISOStringNoMS, getFileLastModifiedAsISONoMSIfPossible, tryParseJson, getBoundedExpBackoffRetryDelay }; diff --git a/src/util/url.js b/src/util/url.js new file mode 100644 index 0000000000..91039d5ab3 --- /dev/null +++ b/src/util/url.js @@ -0,0 +1,41 @@ +/** + * @flow + * @file Utility functions for urls + * @author Box + */ +import Uri from 'jsuri'; + +/** + * Update URL query parameters + * + * @param {string} url - the url that contains the potential query parameter string + * @param {Object} queryParams + * @return {string} + */ +function updateQueryParameters(url: string, queryParams: Object): string { + if (!queryParams) { + return url; + } + + const uri = new Uri(url); + + Object.keys(queryParams).forEach((key) => { + const value = queryParams[key]; + + if (!value) { + return; + } + + if (uri.hasQueryParam(key)) { + uri.replaceQueryParam(key, value); + return; + } + + uri.addQueryParam(key, value); + }); + + return uri.toString(); +} + +// eslint-disable-next-line import/prefer-default-export +export { updateQueryParameters }; diff --git a/src/util/webcrypto.js b/src/util/webcrypto.js index b1b2a559a5..f98afe8419 100644 --- a/src/util/webcrypto.js +++ b/src/util/webcrypto.js @@ -18,10 +18,10 @@ function getCrypto(): Object { * hash function and text given as parameters * * @param {string} algorithm - * @param {Uint8Array} buffer + * @param {ArrayBuffer} buffer * @return {Promise} Promise - resolves with an ArrayBuffer containing the digest result */ -function digest(algorithm: string, buffer: Uint8Array): Promise { +function digest(algorithm: string, buffer: ArrayBuffer): Promise { const cryptoRef = getCrypto(); if (cryptoRef !== window.msCrypto) { diff --git a/yarn.lock b/yarn.lock index a2695cb08d..71ae7becbc 100644 --- a/yarn.lock +++ b/yarn.lock @@ -4524,6 +4524,10 @@ jsprim@^1.2.2: json-schema "0.2.3" verror "1.3.6" +jsuri@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/jsuri/-/jsuri-1.3.1.tgz#cd93fc6a87b255142cb7b0f479f00517ab9395ed" + jsx-ast-utils@^1.4.0, jsx-ast-utils@^1.4.1: version "1.4.1" resolved "https://registry.yarnpkg.com/jsx-ast-utils/-/jsx-ast-utils-1.4.1.tgz#3867213e8dd79bf1e8f2300c0cfc1efb182c0df1"