diff --git a/samples/detect.js b/samples/detect.js index 2507507f..5b5ca801 100644 --- a/samples/detect.js +++ b/samples/detect.js @@ -29,7 +29,7 @@ function detectFaces(fileName) { // const fileName = 'Local image file, e.g. /path/to/image.png'; client - .faceDetection({image: {source: {filename: fileName}}}) + .faceDetection(fileName) .then(results => { const faces = results[0].faceAnnotations; @@ -62,17 +62,9 @@ function detectFacesGCS(bucketName, fileName) { // const bucketName = 'Bucket where the file resides, e.g. my-bucket'; // const fileName = 'Path to file within bucket, e.g. path/to/image.png'; - const request = { - image: { - source: { - imageUri: `gs://${bucketName}/${fileName}`, - }, - }, - }; - // Performs face detection on the gcs file client - .faceDetection(request) + .faceDetection(`gs://${bucketName}/${fileName}`) .then(results => { const faces = results[0].faceAnnotations; @@ -106,7 +98,7 @@ function detectLabels(fileName) { // Performs label detection on the local file client - .labelDetection({image: {source: {filename: fileName}}}) + .labelDetection(fileName) .then(results => { const labels = results[0].labelAnnotations; console.log('Labels:'); @@ -132,17 +124,9 @@ function detectLabelsGCS(bucketName, fileName) { // const bucketName = 'Bucket where the file resides, e.g. my-bucket'; // const fileName = 'Path to file within bucket, e.g. path/to/image.png'; - const request = { - image: { - source: { - imageUri: `gs://${bucketName}/${fileName}`, - }, - }, - }; - // Performs label detection on the gcs file client - .labelDetection(request) + .labelDetection(`gs://${bucketName}/${fileName}`) .then(results => { const labels = results[0].labelAnnotations; console.log('Labels:'); @@ -168,7 +152,7 @@ function detectLandmarks(fileName) { // Performs landmark detection on the local file client - .landmarkDetection({image: {source: {filename: fileName}}}) + .landmarkDetection(fileName) .then(results => { const landmarks = results[0].landmarkAnnotations; console.log('Landmarks:'); @@ -194,17 +178,9 @@ function detectLandmarksGCS(bucketName, fileName) { // const bucketName = 'Bucket where the file resides, e.g. my-bucket'; // const fileName = 'Path to file within bucket, e.g. path/to/image.png'; - const request = { - image: { - source: { - imageUri: `gs://${bucketName}/${fileName}`, - }, - }, - }; - // Performs landmark detection on the gcs file client - .landmarkDetection(request) + .landmarkDetection(`gs://${bucketName}/${fileName}`) .then(results => { const landmarks = results[0].landmarkAnnotations; console.log('Landmarks:'); @@ -230,7 +206,7 @@ function detectText(fileName) { // Performs text detection on the local file client - .textDetection({image: {source: {filename: fileName}}}) + .textDetection(fileName) .then(results => { const detections = results[0].textAnnotations; console.log('Text:'); @@ -256,17 +232,9 @@ function detectTextGCS(bucketName, fileName) { // const bucketName = 'Bucket where the file resides, e.g. my-bucket'; // const fileName = 'Path to file within bucket, e.g. path/to/image.png'; - const request = { - image: { - source: { - imageUri: `gs://${bucketName}/${fileName}`, - }, - }, - }; - // Performs text detection on the gcs file client - .textDetection(request) + .textDetection(`gs://${bucketName}/${fileName}`) .then(results => { const detections = results[0].textAnnotations; console.log('Text:'); @@ -292,7 +260,7 @@ function detectLogos(fileName) { // Performs logo detection on the local file client - .logoDetection({image: {source: {filename: fileName}}}) + .logoDetection(fileName) .then(results => { const logos = results[0].logoAnnotations; console.log('Logos:'); @@ -318,17 +286,9 @@ function detectLogosGCS(bucketName, fileName) { // const bucketName = 'Bucket where the file resides, e.g. my-bucket'; // const fileName = 'Path to file within bucket, e.g. path/to/image.png'; - const request = { - image: { - source: { - imageUri: `gs://${bucketName}/${fileName}`, - }, - }, - }; - // Performs logo detection on the gcs file client - .logoDetection(request) + .logoDetection(`gs://${bucketName}/${fileName}`) .then(results => { const logos = results[0].logoAnnotations; console.log('Logos:'); @@ -354,7 +314,7 @@ function detectProperties(fileName) { // Performs property detection on the local file client - .imageProperties({image: {source: {filename: fileName}}}) + .imageProperties(fileName) .then(results => { const properties = results[0].imagePropertiesAnnotation; const colors = properties.dominantColors.colors; @@ -380,17 +340,9 @@ function detectPropertiesGCS(bucketName, fileName) { // const bucketName = 'Bucket where the file resides, e.g. my-bucket'; // const fileName = 'Path to file within bucket, e.g. path/to/image.png'; - const request = { - image: { - source: { - imageUri: `gs://${bucketName}/${fileName}`, - }, - }, - }; - // Performs property detection on the gcs file client - .imageProperties(request) + .imageProperties(`gs://${bucketName}/${fileName}`) .then(results => { const properties = results[0].imagePropertiesAnnotation; const colors = properties.dominantColors.colors; @@ -416,7 +368,7 @@ function detectSafeSearch(fileName) { // Performs safe search detection on the local file client - .safeSearchDetection({image: {source: {filename: fileName}}}) + .safeSearchDetection(fileName) .then(results => { const detections = results[0].safeSearchAnnotation; @@ -445,17 +397,9 @@ function detectSafeSearchGCS(bucketName, fileName) { // const bucketName = 'Bucket where the file resides, e.g. my-bucket'; // const fileName = 'Path to file within bucket, e.g. path/to/image.png'; - const request = { - image: { - source: { - imageUri: `gs://${bucketName}/${fileName}`, - }, - }, - }; - // Performs safe search property detection on the remote file client - .safeSearchDetection(request) + .safeSearchDetection(`gs://${bucketName}/${fileName}`) .then(results => { const detections = results[0].safeSearchAnnotation; @@ -486,7 +430,7 @@ function detectCropHints(fileName) { // Find crop hints for the local file client - .cropHints({image: {source: {filename: fileName}}}) + .cropHints(fileName) .then(results => { const cropHints = results[0].cropHintsAnnotation; @@ -518,17 +462,9 @@ function detectCropHintsGCS(bucketName, fileName) { // const bucketName = 'Bucket where the file resides, e.g. my-bucket'; // const fileName = 'Path to file within bucket, e.g. path/to/image.png'; - const request = { - image: { - source: { - imageUri: `gs://${bucketName}/${fileName}`, - }, - }, - }; - // Find crop hints for the remote file client - .cropHints(request) + .cropHints(`gs://${bucketName}/${fileName}`) .then(results => { const cropHints = results[0].cropHintsAnnotation; @@ -561,7 +497,7 @@ function detectWeb(fileName) { // Detect similar images on the web to a local file client - .webDetection({image: {source: {filename: fileName}}}) + .webDetection(fileName) .then(results => { const webDetection = results[0].webDetection; @@ -614,17 +550,9 @@ function detectWebGCS(bucketName, fileName) { // const bucketName = 'Bucket where the file resides, e.g. my-bucket'; // const fileName = 'Path to file within bucket, e.g. path/to/image.png'; - const request = { - image: { - source: { - imageUri: `gs://${bucketName}/${fileName}`, - }, - }, - }; - // Detect similar images on the web to a remote file client - .webDetection(request) + .webDetection(`gs://${bucketName}/${fileName}`) .then(results => { const webDetection = results[0].webDetection; @@ -678,7 +606,7 @@ function detectFulltext(fileName) { // Read a local image as a text document client - .documentTextDetection({image: {source: {filename: fileName}}}) + .documentTextDetection(fileName) .then(results => { const fullTextAnnotation = results[0].fullTextAnnotation; console.log(fullTextAnnotation.text); @@ -704,17 +632,9 @@ function detectFulltextGCS(bucketName, fileName) { // const bucketName = 'Bucket where the file resides, e.g. my-bucket'; // const fileName = 'Path to file within bucket, e.g. path/to/image.png'; - const request = { - image: { - source: { - imageUri: `gs://${bucketName}/${fileName}`, - }, - }, - }; - // Read a remote image as a text document client - .documentTextDetection(request) + .documentTextDetection(`gs://${bucketName}/${fileName}`) .then(results => { const fullTextAnnotation = results[0].fullTextAnnotation; console.log(fullTextAnnotation.text); diff --git a/samples/quickstart.js b/samples/quickstart.js index 79e265f0..9afcff3d 100644 --- a/samples/quickstart.js +++ b/samples/quickstart.js @@ -22,21 +22,9 @@ const vision = require('@google-cloud/vision'); // Creates a client const client = new vision.ImageAnnotatorClient(); -// The name of the image file to annotate -const fileName = './resources/wakeupcat.jpg'; - -// Prepare the request object -const request = { - image: { - source: { - filename: fileName, - }, - }, -}; - // Performs label detection on the image file client - .labelDetection(request) + .labelDetection('./resources/wakeupcat.jpg') .then(results => { const labels = results[0].labelAnnotations; diff --git a/src/helpers.js b/src/helpers.js index 4af31eb8..fbf37374 100644 --- a/src/helpers.js +++ b/src/helpers.js @@ -24,37 +24,71 @@ const promisify = require('@google-cloud/common').util.promisify; const gax = require('google-gax'); /*! - * Find a given image and fire a callback with the appropriate image structure. + * Convert non-object request forms into a correctly-formatted object. * - * @param {object} image An object representing what is known about the - * image. + * @param {object|string|Buffer} request An object representing an + * AnnotateImageRequest. May also be a string representing the path + * (filename or URL) to the image, or a buffer representing the image itself. + * + * @returns An object representing an AnnotateImageRequest. + */ +let _requestToObject = request => { + if (is.string(request)) { + // Is this a URL or a local file? + // Guess based on what the string looks like, and build the full + // request object in the correct format. + if (request.indexOf('://') === -1 || request.indexOf('file://') === 0) { + request = {image: {source: {filename: request}}}; + } else { + request = {image: {source: {imageUri: request}}}; + } + } else if (Buffer.isBuffer(request)) { + // Drop the buffer one level lower; it will get dealt with later + // in the function. This allows sending and {image: } to + // both work identically. + request = {image: request}; + } + return request; +}; + +/*! + * Coerce several nicer iterations of "how to specify an image" to the + * full sturcture expected by the Vision API. + * + * @param {object} request An object representing an AnnotateImageRequest. + * It may include `image.source.filename` or a buffer passed to + * `image.content`, which are coerced into their canonical forms by this + * function. * @param {function} callback The callback to run. */ -var coerceImage = (image, callback) => { +let _coerceRequest = (request, callback) => { + // At this point, request must be an object with an `image` key; if not, + // it is an error. If there is no image, throw an exception. + if (!is.object(request) || is.undefined(request.image)) { + return callback(new Error('No image present.')); + } + // If this is a buffer, read it and send the object // that the Vision API expects. - if (Buffer.isBuffer(image)) { - callback(null, { - content: image.toString('base64'), - }); - return; + if (Buffer.isBuffer(request.image)) { + request.image = {content: request.image.toString('base64')}; } - // File exists on disk. - if (image.source && image.source.filename) { - fs.readFile(image.source.filename, {encoding: 'base64'}, (err, blob) => { + // If the file is specified as a filename and exists on disk, read it + // and coerce it into the base64 content. + if (request.image.source && request.image.source.filename) { + fs.readFile(request.image.source.filename, (err, blob) => { if (err) { callback(err); return; } - callback(null, {content: blob.toString('base64')}); + request.image.content = blob.toString('base64'); + delete request.image.source; + return callback(null, request); }); - return; + } else { + return callback(null, request); } - - // No other options were relevant; return the image with no modification. - callback(null, image); - return; }; /*! @@ -68,12 +102,29 @@ var coerceImage = (image, callback) => { * asking for the single feature annotation. */ var _createSingleFeatureMethod = featureValue => { - return function(annotateImageRequest, callOptions) { + return function(annotateImageRequest, callOptions, callback) { + // Sanity check: If we got a string or buffer, we need this to be + // in object form now, so we can tack on the features list. + // + // Do the minimum required conversion, which can also be guaranteed to + // be synchronous (e.g. no file loading yet; that is handled by + // annotateImage later. + annotateImageRequest = _requestToObject(annotateImageRequest); + + // If a callback was provided and options were skipped, normalize + // the argument names. + if (is.undefined(callback) && is.function(callOptions)) { + callback = callOptions; + callOptions = undefined; + } + + // Add the feature to the request. annotateImageRequest.features = annotateImageRequest.features || [ { type: featureValue, }, ]; + // If the user submitted explicit features that do not line up with // the precise method called, throw an exception. for (let feature of annotateImageRequest.features) { @@ -84,8 +135,9 @@ var _createSingleFeatureMethod = featureValue => { ); } } + // Call the underlying #annotateImage method. - return this.annotateImage(annotateImageRequest, callOptions); + return this.annotateImage(annotateImageRequest, callOptions, callback); }; }; @@ -108,8 +160,11 @@ module.exports = apiVersion => { * @see google.cloud.vision.v1.AnnotateImageRequest * * @method v1.ImageAnnotatorClient#annotateImage - * @param {object} request A representation of the request being sent to the - * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object|string|Buffer} request A representation of the request + * being sent to the Vision API. This is an + * {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * For simple cases, you may also send a string (the URL or filename of + * the image) or a buffer (the image itself). * @param {object} request.image A dictionary-like object representing the * image. This should have a single key (`source`, `content`). * @@ -163,21 +218,15 @@ module.exports = apiVersion => { callOptions = undefined; } - // If there is no image, throw an exception. - if (is.undefined(request.image)) { - throw new Error('Attempted to call `annotateImage` with no image.'); - } - // If we got a filename for the image, open the file and transform // it to content. - return coerceImage(request.image, (err, image) => { + return _coerceRequest(request, (err, req) => { if (err) { return callback(err); } - request.image = image; // Call the GAPIC batch annotation function. - let requests = {requests: [request]}; + let requests = {requests: [req]}; return this.batchAnnotateImages(requests, callOptions, (err, r) => { // If there is an error, handle it. if (err) { @@ -212,8 +261,11 @@ module.exports = apiVersion => { * @see google.cloud.vision.v1.AnnotateImageRequest * * @method v1.ImageAnnotatorClient#faceDetection - * @param {object} request A representation of the request being sent to the - * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object|string|Buffer} request A representation of the request + * being sent to the Vision API. This is an + * {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * For simple cases, you may also send a string (the URL or filename of + * the image) or a buffer (the image itself). * @param {object} request.image A dictionary-like object representing the * image. This should have a single key (`source`, `content`). * @@ -266,8 +318,11 @@ module.exports = apiVersion => { * @see google.cloud.vision.v1.AnnotateImageRequest * * @method v1.ImageAnnotatorClient#landmarkDetection - * @param {object} request A representation of the request being sent to the - * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object|string|Buffer} request A representation of the request + * being sent to the Vision API. This is an + * {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * For simple cases, you may also send a string (the URL or filename of + * the image) or a buffer (the image itself). * @param {object} request.image A dictionary-like object representing the * image. This should have a single key (`source`, `content`). * @@ -320,8 +375,11 @@ module.exports = apiVersion => { * @see google.cloud.vision.v1.AnnotateImageRequest * * @method v1.ImageAnnotatorClient#logoDetection - * @param {object} request A representation of the request being sent to the - * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object|string|Buffer} request A representation of the request + * being sent to the Vision API. This is an + * {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * For simple cases, you may also send a string (the URL or filename of + * the image) or a buffer (the image itself). * @param {object} request.image A dictionary-like object representing the * image. This should have a single key (`source`, `content`). * @@ -365,8 +423,11 @@ module.exports = apiVersion => { * @see google.cloud.vision.v1.AnnotateImageRequest * * @method v1.ImageAnnotatorClient#labelDetection - * @param {object} request A representation of the request being sent to the - * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object|string|Buffer} request A representation of the request + * being sent to the Vision API. This is an + * {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * For simple cases, you may also send a string (the URL or filename of + * the image) or a buffer (the image itself). * @param {object} request.image A dictionary-like object representing the * image. This should have a single key (`source`, `content`). * @@ -410,8 +471,11 @@ module.exports = apiVersion => { * @see google.cloud.vision.v1.AnnotateImageRequest * * @method v1.ImageAnnotatorClient#textDetection - * @param {object} request A representation of the request being sent to the - * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object|string|Buffer} request A representation of the request + * being sent to the Vision API. This is an + * {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * For simple cases, you may also send a string (the URL or filename of + * the image) or a buffer (the image itself). * @param {object} request.image A dictionary-like object representing the * image. This should have a single key (`source`, `content`). * @@ -455,8 +519,11 @@ module.exports = apiVersion => { * @see google.cloud.vision.v1.AnnotateImageRequest * * @method v1.ImageAnnotatorClient#documentTextDetection - * @param {object} request A representation of the request being sent to the - * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object|string|Buffer} request A representation of the request + * being sent to the Vision API. This is an + * {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * For simple cases, you may also send a string (the URL or filename of + * the image) or a buffer (the image itself). * @param {object} request.image A dictionary-like object representing the * image. This should have a single key (`source`, `content`). * @@ -500,8 +567,11 @@ module.exports = apiVersion => { * @see google.cloud.vision.v1.AnnotateImageRequest * * @method v1.ImageAnnotatorClient#safeSearchDetection - * @param {object} request A representation of the request being sent to the - * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object|string|Buffer} request A representation of the request + * being sent to the Vision API. This is an + * {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * For simple cases, you may also send a string (the URL or filename of + * the image) or a buffer (the image itself). * @param {object} request.image A dictionary-like object representing the * image. This should have a single key (`source`, `content`). * @@ -545,8 +615,11 @@ module.exports = apiVersion => { * @see google.cloud.vision.v1.AnnotateImageRequest * * @method v1.ImageAnnotatorClient#imageProperties - * @param {object} request A representation of the request being sent to the - * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object|string|Buffer} request A representation of the request + * being sent to the Vision API. This is an + * {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * For simple cases, you may also send a string (the URL or filename of + * the image) or a buffer (the image itself). * @param {object} request.image A dictionary-like object representing the * image. This should have a single key (`source`, `content`). * @@ -590,8 +663,11 @@ module.exports = apiVersion => { * @see google.cloud.vision.v1.AnnotateImageRequest * * @method v1.ImageAnnotatorClient#cropHints - * @param {object} request A representation of the request being sent to the - * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object|string|Buffer} request A representation of the request + * being sent to the Vision API. This is an + * {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * For simple cases, you may also send a string (the URL or filename of + * the image) or a buffer (the image itself). * @param {object} request.image A dictionary-like object representing the * image. This should have a single key (`source`, `content`). * @@ -635,8 +711,11 @@ module.exports = apiVersion => { * @see google.cloud.vision.v1.AnnotateImageRequest * * @method v1.ImageAnnotatorClient#webDetection - * @param {object} request A representation of the request being sent to the - * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object|string|Buffer} request A representation of the request + * being sent to the Vision API. This is an + * {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * For simple cases, you may also send a string (the URL or filename of + * the image) or a buffer (the image itself). * @param {object} request.image A dictionary-like object representing the * image. This should have a single key (`source`, `content`). * diff --git a/system-test/vision.js b/system-test/vision.js index ab12d91c..2f7e84b6 100644 --- a/system-test/vision.js +++ b/system-test/vision.js @@ -81,43 +81,25 @@ describe('Vision', function() { it('should detect from a URL', () => { var url = 'https://upload.wikimedia.org/wikipedia/commons/5/51/Google.png'; - return client - .logoDetection({ - image: { - source: {imageUri: url}, - }, - }) - .then(responses => { - var response = responses[0]; - assert.deepEqual(response.logoAnnotations[0].description, 'Google'); - }); + return client.logoDetection(url).then(responses => { + var response = responses[0]; + assert.deepEqual(response.logoAnnotations[0].description, 'Google'); + }); }); it('should detect from a filename', () => { - return client - .logoDetection({ - image: { - source: {filename: IMAGES.logo}, - }, - }) - .then(responses => { - var response = responses[0]; - assert.deepEqual(response.logoAnnotations[0].description, 'Google'); - }); + return client.logoDetection(IMAGES.logo).then(responses => { + var response = responses[0]; + assert.deepEqual(response.logoAnnotations[0].description, 'Google'); + }); }); it('should detect from a Buffer', () => { var buffer = fs.readFileSync(IMAGES.logo); - return client - .logoDetection({ - image: { - content: buffer, - }, - }) - .then(responses => { - var response = responses[0]; - assert.deepEqual(response.logoAnnotations[0].description, 'Google'); - }); + return client.logoDetection(buffer).then(responses => { + var response = responses[0]; + assert.deepEqual(response.logoAnnotations[0].description, 'Google'); + }); }); describe('single image', () => { diff --git a/test/helpers.test.js b/test/helpers.test.js index 26c13bc4..0dada9c1 100644 --- a/test/helpers.test.js +++ b/test/helpers.test.js @@ -69,7 +69,7 @@ describe('Vision helper methods', () => { }); }); - it('understands buffers', () => { + it('understands buffers in a request object', () => { let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); // Stub out the batch annotation method. @@ -115,7 +115,7 @@ describe('Vision helper methods', () => { let readFile = sandbox.stub(fs, 'readFile'); readFile .withArgs('image.jpg') - .callsArgWith(2, null, Buffer.from('fakeImage')); + .callsArgWith(1, null, Buffer.from('fakeImage')); readFile.callThrough(); // Stub out the batch annotation method as before. @@ -164,7 +164,7 @@ describe('Vision helper methods', () => { // Stub out `fs.readFile` and return a bogus image object. // This allows us to test filename detection. let readFile = sandbox.stub(fs, 'readFile'); - readFile.withArgs('image.jpg').callsArgWith(2, {error: 404}); + readFile.withArgs('image.jpg').callsArgWith(1, {error: 404}); readFile.callThrough(); // Ensure that the annotateImage method arrifies the request and @@ -274,14 +274,13 @@ describe('Vision helper methods', () => { .annotateImage(request) .then(assert.fail) .catch(err => { - let expected = 'Attempted to call `annotateImage` with no image.'; - assert(err.message === expected); + assert(err.message === 'No image present.'); }); }); }); describe('single-feature methods', () => { - it('calls annotateImage with the correct feature', () => { + it('call `annotateImage` with the correct feature', () => { let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); let annotate = sandbox.spy(client, 'annotateImage'); let batchAnnotate = sandbox.stub(client, 'batchAnnotateImages'); @@ -322,7 +321,152 @@ describe('Vision helper methods', () => { }); }); - it('throws an exception if conflicting features are given', () => { + it('accept a URL as a string', () => { + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); + + // Stub out the batch annotation method. + let batchAnnotate = sandbox.stub(client, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, { + responses: [ + { + logoAnnotations: [{description: 'Google'}], + }, + ], + }); + + // Call a request to a single-feature method using a URL. + return client.logoDetection('https://goo.gl/logo.png').then(r => { + let response = r[0]; + + // Ensure we got the slice of the response that we expected. + assert.deepEqual(response, { + logoAnnotations: [{description: 'Google'}], + }); + + // Inspect the calls to batchAnnotateImages and ensure they matched + // the expected signature. + assert(batchAnnotate.callCount === 1); + assert( + batchAnnotate.calledWith({ + requests: [ + { + image: {source: {imageUri: 'https://goo.gl/logo.png'}}, + features: [{type: 3}], + }, + ], + }) + ); + }); + }); + + it('accept a filename as a string', () => { + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); + + // Stub out the batch annotation method. + let annotate = sandbox.stub(client, 'annotateImage'); + annotate.callsArgWith(2, undefined, { + logoAnnotations: [{description: 'Google'}], + }); + + // Call a request to a single-feature method using a URL. + return client.logoDetection('/path/to/logo.png').then(response => { + // Ensure we got the slice of the response that we expected. + assert.deepEqual(response, [ + { + logoAnnotations: [{description: 'Google'}], + }, + ]); + + // Inspect the calls to annotateImages and ensure they matched + // the expected signature. + assert(annotate.callCount === 1); + assert( + annotate.calledWith({ + image: {source: {filename: '/path/to/logo.png'}}, + features: [{type: 3}], + }) + ); + }); + }); + + it('understand a buffer sent directly', () => { + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); + + // Stub out the batch annotation method. + let batchAnnotate = sandbox.stub(client, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, { + responses: [ + { + logoAnnotations: [{description: 'Google'}], + }, + ], + }); + + // Ensure that the annotateImage method arrifies the request and + // passes it through to the batch annotation method. + return client.logoDetection(Buffer.from('fakeImage')).then(r => { + let response = r[0]; + + // Ensure that we got the slice of the response that we expected. + assert.deepEqual(response, { + logoAnnotations: [{description: 'Google'}], + }); + + // Inspect the calls to batchAnnotateImages and ensure they matched + // the expected signature. + assert(batchAnnotate.callCount === 1); + assert( + batchAnnotate.calledWith({ + requests: [ + { + image: {content: 'ZmFrZUltYWdl'}, + features: [{type: 3}], + }, + ], + }) + ); + }); + }); + + it('handle being sent call options', () => { + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); + let opts = {foo: 'bar'}; + + // Stub out the batchAnnotateImages method as usual. + let batchAnnotate = sandbox.stub(client, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, { + responses: [ + { + logoAnnotations: [{description: 'Google'}], + }, + ], + }); + + // Perform the request. Send `opts` as an explicit second argument + // to ensure that sending call options works appropriately. + return client.logoDetection(Buffer.from('fakeImage'), opts).then(r => { + let response = r[0]; + assert.deepEqual(response, { + logoAnnotations: [{description: 'Google'}], + }); + + // Inspect the calls to batchAnnotateImages and ensure they matched + // the expected signature. + assert(batchAnnotate.callCount === 1); + assert( + batchAnnotate.calledWith({ + requests: [ + { + image: {content: 'ZmFrZUltYWdl'}, + features: [{type: 3}], + }, + ], + }) + ); + }); + }); + + it('throw an exception if conflicting features are given', () => { let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); let imageRequest = { image: {content: Buffer.from('bogus==')},