diff --git a/.githooks/pre-commit b/.githooks/pre-commit index efcd7f499c..ae98e89e1a 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -132,6 +132,21 @@ then fi fi +# Check integration tests. +if [ $NUM_JAVA_FILES_CHANGED -gt 0 ] \ + || [ $NUM_INTEGRATION_GOLDEN_FILES_CHANGED -gt 0 ] \ + || [ $NUM_INTEGRATION_BAZEL_FILES_CHANGED -gt 0 ] +then + echo_status "Checking integration tests..." + bazel --batch test --disk_cache="$BAZEL_CACHE_DIR" //test/integration/... + TEST_STATUS=$? + if [ $TEST_STATUS != 0 ] + then + echo_error "Tests failed." "Please fix them and try again." + exit 1 + fi +fi + # Check and fix Bazel format. if [ $NUM_BAZEL_FILES_CHANGED -gt 0 ] then diff --git a/src/main/java/com/google/api/generator/gapic/composer/ClientLibraryPackageInfoComposer.java b/src/main/java/com/google/api/generator/gapic/composer/ClientLibraryPackageInfoComposer.java index a26255cd49..c4332baeb8 100644 --- a/src/main/java/com/google/api/generator/gapic/composer/ClientLibraryPackageInfoComposer.java +++ b/src/main/java/com/google/api/generator/gapic/composer/ClientLibraryPackageInfoComposer.java @@ -25,6 +25,9 @@ import com.google.api.generator.gapic.model.Service; import com.google.common.base.Preconditions; import com.google.common.base.Strings; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.annotation.Generated; public class ClientLibraryPackageInfoComposer { @@ -73,16 +76,34 @@ private static CommentStatement createPackageInfoJavadoc(GapicContext context) { javaDocCommentBuilder.addParagraph( String.format("%s %s %s", DIVIDER, javaClientName, DIVIDER)); - // TODO(miraleung): Paragraphs + // TODO(miraleung): Replace this with a comment converter when we upport CommonMark. if (service.hasDescription()) { - String[] descriptionParagraphs = service.description().split("\\r?\\n"); + String[] descriptionParagraphs = service.description().split("\\n\\n"); for (int i = 0; i < descriptionParagraphs.length; i++) { - if (i == 0) { + boolean startsWithItemizedList = descriptionParagraphs[i].startsWith(" * "); + // Split by listed items, then join newlines. + List listItems = + Stream.of(descriptionParagraphs[i].split("\\n \\*")) + .map(s -> s.replace("\n", "")) + .collect(Collectors.toList()); + if (startsWithItemizedList) { + // Remove the first asterisk. + listItems.set(0, listItems.get(0).substring(2)); + } + + if (!startsWithItemizedList) { + if (i == 0) { + javaDocCommentBuilder = + javaDocCommentBuilder.addParagraph( + String.format(SERVICE_DESCRIPTION_HEADER_PATTERN, listItems.get(0))); + } else { + javaDocCommentBuilder = javaDocCommentBuilder.addParagraph(listItems.get(0)); + } + } + if (listItems.size() > 1 || startsWithItemizedList) { javaDocCommentBuilder = - javaDocCommentBuilder.addParagraph( - String.format(SERVICE_DESCRIPTION_HEADER_PATTERN, descriptionParagraphs[i])); - } else { - javaDocCommentBuilder = javaDocCommentBuilder.addParagraph(descriptionParagraphs[i]); + javaDocCommentBuilder.addUnorderedList( + listItems.subList(startsWithItemizedList ? 0 : 1, listItems.size())); } } } diff --git a/src/main/java/com/google/api/generator/gapic/composer/ServiceClientCommentComposer.java b/src/main/java/com/google/api/generator/gapic/composer/ServiceClientCommentComposer.java index 49767dfaf3..fca8c84ee9 100644 --- a/src/main/java/com/google/api/generator/gapic/composer/ServiceClientCommentComposer.java +++ b/src/main/java/com/google/api/generator/gapic/composer/ServiceClientCommentComposer.java @@ -21,9 +21,12 @@ import com.google.api.generator.gapic.model.MethodArgument; import com.google.api.generator.gapic.model.Service; import com.google.api.generator.gapic.utils.JavaStyle; +import com.google.common.base.Strings; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; class ServiceClientCommentComposer { // Tokens. @@ -102,8 +105,11 @@ class ServiceClientCommentComposer { static List createClassHeaderComments(Service service) { JavaDocComment.Builder classHeaderJavadocBuilder = JavaDocComment.builder(); if (service.hasDescription()) { - classHeaderJavadocBuilder.addComment( - String.format(SERVICE_DESCRIPTION_SUMMARY_PATTERN, service.description())); + classHeaderJavadocBuilder = + processProtobufComment( + service.description(), + classHeaderJavadocBuilder, + SERVICE_DESCRIPTION_SUMMARY_PATTERN); } // Service introduction. @@ -146,7 +152,8 @@ static List createRpcMethodHeaderComment( JavaDocComment.Builder methodJavadocBuilder = JavaDocComment.builder(); if (method.hasDescription()) { - methodJavadocBuilder.addComment(method.description()); + methodJavadocBuilder = + processProtobufComment(method.description(), methodJavadocBuilder, null); } methodJavadocBuilder.addParagraph(METHOD_DESCRIPTION_SAMPLE_CODE_SUMMARY_STRING); @@ -157,8 +164,11 @@ static List createRpcMethodHeaderComment( "request", "The request object containing all of the parameters for the API call."); } else { for (MethodArgument argument : methodArguments) { + // TODO(miraleung): Remove the newline replacement when we support CommonMark. String description = - argument.field().hasDescription() ? argument.field().description() : EMPTY_STRING; + argument.field().hasDescription() + ? argument.field().description().replace("\n", "") + : EMPTY_STRING; methodJavadocBuilder.addParam(argument.name(), description); } } @@ -190,7 +200,8 @@ static List createRpcCallableMethodHeaderComment(Method method JavaDocComment.Builder methodJavadocBuilder = JavaDocComment.builder(); if (method.hasDescription()) { - methodJavadocBuilder.addComment(method.description()); + methodJavadocBuilder = + processProtobufComment(method.description(), methodJavadocBuilder, null); } methodJavadocBuilder.addParagraph(METHOD_DESCRIPTION_SAMPLE_CODE_SUMMARY_STRING); @@ -204,4 +215,42 @@ static List createRpcCallableMethodHeaderComment(Method method private static CommentStatement toSimpleComment(String comment) { return CommentStatement.withComment(JavaDocComment.withComment(comment)); } + + // TODO(miraleung): Replace this with a comment converter when we upport CommonMark. + private static JavaDocComment.Builder processProtobufComment( + String rawComment, JavaDocComment.Builder originalCommentBuilder, String firstPattern) { + JavaDocComment.Builder commentBuilder = originalCommentBuilder; + String[] descriptionParagraphs = rawComment.split("\\n\\n"); + for (int i = 0; i < descriptionParagraphs.length; i++) { + boolean startsWithItemizedList = descriptionParagraphs[i].startsWith(" * "); + // Split by listed items, then join newlines. + List listItems = + Stream.of(descriptionParagraphs[i].split("\\n \\*")) + .map(s -> s.replace("\n", "")) + .collect(Collectors.toList()); + if (startsWithItemizedList) { + // Remove the first asterisk. + listItems.set(0, listItems.get(0).substring(2)); + } + if (!startsWithItemizedList) { + if (i == 0) { + if (!Strings.isNullOrEmpty(firstPattern)) { + commentBuilder = + commentBuilder.addParagraph(String.format(firstPattern, listItems.get(0))); + } else { + commentBuilder = commentBuilder.addParagraph(listItems.get(0)); + } + } else { + commentBuilder = commentBuilder.addParagraph(listItems.get(0)); + } + } + if (listItems.size() > 1 || startsWithItemizedList) { + commentBuilder = + commentBuilder.addUnorderedList( + listItems.subList(startsWithItemizedList ? 0 : 1, listItems.size())); + } + } + + return commentBuilder; + } } diff --git a/src/main/java/com/google/api/generator/gapic/model/SourceCodeInfoLocation.java b/src/main/java/com/google/api/generator/gapic/model/SourceCodeInfoLocation.java index fca6e7c791..f39cb39530 100644 --- a/src/main/java/com/google/api/generator/gapic/model/SourceCodeInfoLocation.java +++ b/src/main/java/com/google/api/generator/gapic/model/SourceCodeInfoLocation.java @@ -14,8 +14,6 @@ package com.google.api.generator.gapic.model; -import com.google.common.escape.Escaper; -import com.google.common.escape.Escapers; import com.google.protobuf.DescriptorProtos.SourceCodeInfo.Location; import javax.annotation.Nonnull; @@ -24,9 +22,6 @@ * additional documentation on descriptor.proto. */ public class SourceCodeInfoLocation { - // Not a singleton because of nested-class instantiation mechanics. - private final NewlineEscaper ESCAPER = new NewlineEscaper(); - @Nonnull private final Location location; private SourceCodeInfoLocation(Location location) { @@ -50,15 +45,6 @@ public String getLeadingDetachedComments(int index) { } private String processProtobufComment(String s) { - return ESCAPER.escape(s).trim(); - } - - private class NewlineEscaper extends Escaper { - private final Escaper charEscaper = Escapers.builder().addEscape('\n', "").build(); - - @Override - public String escape(String sourceString) { - return charEscaper.escape(sourceString); - } + return s.trim(); } } diff --git a/src/test/java/com/google/api/generator/gapic/protoparser/SourceCodeInfoParserTest.java b/src/test/java/com/google/api/generator/gapic/protoparser/SourceCodeInfoParserTest.java index b8daabefeb..a33f20c782 100644 --- a/src/test/java/com/google/api/generator/gapic/protoparser/SourceCodeInfoParserTest.java +++ b/src/test/java/com/google/api/generator/gapic/protoparser/SourceCodeInfoParserTest.java @@ -48,7 +48,7 @@ public void setUp() throws Exception { public void getServiceInfo() { SourceCodeInfoLocation location = parser.getLocation(protoFile.findServiceByName("FooService")); assertEquals( - "This is a service description. It takes up multiple lines, like so.", + "This is a service description.\n It takes up multiple lines, like so.", location.getLeadingComments()); location = parser.getLocation(protoFile.findServiceByName("BarService")); @@ -60,7 +60,7 @@ public void getMethodInfo() { ServiceDescriptor service = protoFile.findServiceByName("FooService"); SourceCodeInfoLocation location = parser.getLocation(service.findMethodByName("FooMethod")); assertEquals( - "FooMethod does something. This comment also takes up multiple lines.", + "FooMethod does something.\n This comment also takes up multiple lines.", location.getLeadingComments()); service = protoFile.findServiceByName("BarService"); @@ -73,13 +73,13 @@ public void getOuterMessageInfo() { Descriptor message = protoFile.findMessageTypeByName("FooMessage"); SourceCodeInfoLocation location = parser.getLocation(message); assertEquals( - "This is a message descxription. Lorum ipsum dolor sit amet consectetur adipiscing elit.", + "This is a message descxription.\n Lorum ipsum dolor sit amet consectetur adipiscing elit.", location.getLeadingComments()); // Fields. location = parser.getLocation(message.findFieldByName("field_one")); assertEquals( - "This is a field description for field_one. And here is the second line of that" + "This is a field description for field_one.\n And here is the second line of that" + " description.", location.getLeadingComments()); assertEquals("A field trailing comment.", location.getTrailingComments()); diff --git a/test/integration/goldens/logging/ConfigServiceV2Client.java b/test/integration/goldens/logging/ConfigServiceV2Client.java index 930d4fa1c9..7e0b805328 100644 --- a/test/integration/goldens/logging/ConfigServiceV2Client.java +++ b/test/integration/goldens/logging/ConfigServiceV2Client.java @@ -300,10 +300,15 @@ public final UnaryCallable getBucketCallable() { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Updates a bucket. This method replaces the following fields in the existing bucket with values - * from the new bucket: `retention_period` If the retention period is decreased and the bucket is - * locked, FAILED_PRECONDITION will be returned. If the bucket has a LifecycleState of - * DELETE_REQUESTED, FAILED_PRECONDITION will be returned. A buckets region may not be modified - * after it is created. This method is in Beta. + * from the new bucket: `retention_period` + * + *

If the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be + * returned. + * + *

If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be + * returned. + * + *

A buckets region may not be modified after it is created. This method is in Beta. * *

Sample code: * @@ -317,10 +322,15 @@ public final LogBucket updateBucket(UpdateBucketRequest request) { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Updates a bucket. This method replaces the following fields in the existing bucket with values - * from the new bucket: `retention_period` If the retention period is decreased and the bucket is - * locked, FAILED_PRECONDITION will be returned. If the bucket has a LifecycleState of - * DELETE_REQUESTED, FAILED_PRECONDITION will be returned. A buckets region may not be modified - * after it is created. This method is in Beta. + * from the new bucket: `retention_period` + * + *

If the retention period is decreased and the bucket is locked, FAILED_PRECONDITION will be + * returned. + * + *

If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION will be + * returned. + * + *

A buckets region may not be modified after it is created. This method is in Beta. * *

Sample code: */ @@ -672,8 +682,10 @@ public final UnaryCallable createSinkCallable() { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Updates a sink. This method replaces the following fields in the existing sink with values from - * the new sink: `destination`, and `filter`. The updated sink might also have a new - * `writer_identity`; see the `unique_writer_identity` field. + * the new sink: `destination`, and `filter`. + * + *

The updated sink might also have a new `writer_identity`; see the `unique_writer_identity` + * field. * *

Sample code: * @@ -698,8 +710,10 @@ public final LogSink updateSink(LogSinkName sinkName, LogSink sink) { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Updates a sink. This method replaces the following fields in the existing sink with values from - * the new sink: `destination`, and `filter`. The updated sink might also have a new - * `writer_identity`; see the `unique_writer_identity` field. + * the new sink: `destination`, and `filter`. + * + *

The updated sink might also have a new `writer_identity`; see the `unique_writer_identity` + * field. * *

Sample code: * @@ -721,8 +735,10 @@ public final LogSink updateSink(String sinkName, LogSink sink) { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Updates a sink. This method replaces the following fields in the existing sink with values from - * the new sink: `destination`, and `filter`. The updated sink might also have a new - * `writer_identity`; see the `unique_writer_identity` field. + * the new sink: `destination`, and `filter`. + * + *

The updated sink might also have a new `writer_identity`; see the `unique_writer_identity` + * field. * *

Sample code: * @@ -757,8 +773,10 @@ public final LogSink updateSink(LogSinkName sinkName, LogSink sink, FieldMask up // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Updates a sink. This method replaces the following fields in the existing sink with values from - * the new sink: `destination`, and `filter`. The updated sink might also have a new - * `writer_identity`; see the `unique_writer_identity` field. + * the new sink: `destination`, and `filter`. + * + *

The updated sink might also have a new `writer_identity`; see the `unique_writer_identity` + * field. * *

Sample code: * @@ -793,8 +811,10 @@ public final LogSink updateSink(String sinkName, LogSink sink, FieldMask updateM // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Updates a sink. This method replaces the following fields in the existing sink with values from - * the new sink: `destination`, and `filter`. The updated sink might also have a new - * `writer_identity`; see the `unique_writer_identity` field. + * the new sink: `destination`, and `filter`. + * + *

The updated sink might also have a new `writer_identity`; see the `unique_writer_identity` + * field. * *

Sample code: * @@ -808,8 +828,10 @@ public final LogSink updateSink(UpdateSinkRequest request) { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Updates a sink. This method replaces the following fields in the existing sink with values from - * the new sink: `destination`, and `filter`. The updated sink might also have a new - * `writer_identity`; see the `unique_writer_identity` field. + * the new sink: `destination`, and `filter`. + * + *

The updated sink might also have a new `writer_identity`; see the `unique_writer_identity` + * field. * *

Sample code: */ @@ -1371,9 +1393,12 @@ public final UnaryCallable deleteExclusionCallabl // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Gets the Logs Router CMEK settings for the given resource. Note: CMEK for the Logs Router can - * currently only be configured for GCP organizations. Once configured, it applies to all projects - * and folders in the GCP organization. See [Enabling CMEK for Logs + * Gets the Logs Router CMEK settings for the given resource. + * + *

Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once + * configured, it applies to all projects and folders in the GCP organization. + * + *

See [Enabling CMEK for Logs * Router](https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. * *

Sample code: @@ -1387,9 +1412,12 @@ public final CmekSettings getCmekSettings(GetCmekSettingsRequest request) { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Gets the Logs Router CMEK settings for the given resource. Note: CMEK for the Logs Router can - * currently only be configured for GCP organizations. Once configured, it applies to all projects - * and folders in the GCP organization. See [Enabling CMEK for Logs + * Gets the Logs Router CMEK settings for the given resource. + * + *

Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once + * configured, it applies to all projects and folders in the GCP organization. + * + *

See [Enabling CMEK for Logs * Router](https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. * *

Sample code: @@ -1400,13 +1428,17 @@ public final UnaryCallable getCmekSettings // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Updates the Logs Router CMEK settings for the given resource. Note: CMEK for the Logs Router - * can currently only be configured for GCP organizations. Once configured, it applies to all - * projects and folders in the GCP organization. - * [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings] will fail if 1) + * Updates the Logs Router CMEK settings for the given resource. + * + *

Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once + * configured, it applies to all projects and folders in the GCP organization. + * + *

[UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings] will fail if 1) * `kms_key_name` is invalid, or 2) the associated service account does not have the required * `roles/cloudkms.cryptoKeyEncrypterDecrypter` role assigned for the key, or 3) access to the key - * is disabled. See [Enabling CMEK for Logs + * is disabled. + * + *

See [Enabling CMEK for Logs * Router](https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. * *

Sample code: @@ -1420,13 +1452,17 @@ public final CmekSettings updateCmekSettings(UpdateCmekSettingsRequest request) // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Updates the Logs Router CMEK settings for the given resource. Note: CMEK for the Logs Router - * can currently only be configured for GCP organizations. Once configured, it applies to all - * projects and folders in the GCP organization. - * [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings] will fail if 1) + * Updates the Logs Router CMEK settings for the given resource. + * + *

Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once + * configured, it applies to all projects and folders in the GCP organization. + * + *

[UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings] will fail if 1) * `kms_key_name` is invalid, or 2) the associated service account does not have the required * `roles/cloudkms.cryptoKeyEncrypterDecrypter` role assigned for the key, or 3) access to the key - * is disabled. See [Enabling CMEK for Logs + * is disabled. + * + *

See [Enabling CMEK for Logs * Router](https://cloud.google.com/logging/docs/routing/managed-encryption) for more information. * *

Sample code: diff --git a/test/integration/goldens/redis/CloudRedisClient.java b/test/integration/goldens/redis/CloudRedisClient.java index 9ee1fc706f..305a9d0e8f 100644 --- a/test/integration/goldens/redis/CloudRedisClient.java +++ b/test/integration/goldens/redis/CloudRedisClient.java @@ -43,15 +43,26 @@ // AUTO-GENERATED DOCUMENTATION AND CLASS. /** - * Service Description: Configures and manages Cloud Memorystore for Redis instances Google Cloud - * Memorystore for Redis v1 The `redis.googleapis.com` service implements the Google Cloud - * Memorystore for Redis API and defines the following resource model for managing Redis instances: - * * The service works with a collection of cloud projects, named: `/projects/*` * Each - * project has a collection of available locations, named: `/locations/*` * Each location - * has a collection of Redis instances, named: `/instances/*` * As such, Redis instances are - * resources of the form: `/projects/{project_id}/locations/{location_id}/instances/{instance_id}` - * Note that location_id must be referring to a GCP `region`; for example: * - * `projects/redpepper-1290/locations/us-central1/instances/my-redis` + * Service Description: Configures and manages Cloud Memorystore for Redis instances + * + *

Google Cloud Memorystore for Redis v1 + * + *

The `redis.googleapis.com` service implements the Google Cloud Memorystore for Redis API and + * defines the following resource model for managing Redis instances: + * + *

    + *
  • The service works with a collection of cloud projects, named: `/projects/*` + *
  • Each project has a collection of available locations, named: `/locations/*` + *
  • Each location has a collection of Redis instances, named: `/instances/*` + *
  • As such, Redis instances are resources of the form: + * `/projects/{project_id}/locations/{location_id}/instances/{instance_id}` + *
+ * + *

Note that location_id must be referring to a GCP `region`; for example: + * + *

    + *
  • `projects/redpepper-1290/locations/us-central1/instances/my-redis` + *
* *

This class provides the ability to make remote calls to the backing service through method * calls that map to API methods. Sample code to get started: @@ -152,10 +163,16 @@ public final OperationsClient getOperationsClient() { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Lists all Redis instances owned by a project in either the specified location (region) or all - * locations. The location should have the following format: * - * `projects/{project_id}/locations/{location_id}` If `location_id` is specified as `-` - * (wildcard), then all regions available to the project are queried, and the results are - * aggregated. + * locations. + * + *

The location should have the following format: + * + *

    + *
  • `projects/{project_id}/locations/{location_id}` + *
+ * + *

If `location_id` is specified as `-` (wildcard), then all regions available to the project + * are queried, and the results are aggregated. * *

Sample code: * @@ -174,10 +191,16 @@ public final ListInstancesPagedResponse listInstances(LocationName parent) { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Lists all Redis instances owned by a project in either the specified location (region) or all - * locations. The location should have the following format: * - * `projects/{project_id}/locations/{location_id}` If `location_id` is specified as `-` - * (wildcard), then all regions available to the project are queried, and the results are - * aggregated. + * locations. + * + *

The location should have the following format: + * + *

    + *
  • `projects/{project_id}/locations/{location_id}` + *
+ * + *

If `location_id` is specified as `-` (wildcard), then all regions available to the project + * are queried, and the results are aggregated. * *

Sample code: * @@ -193,10 +216,16 @@ public final ListInstancesPagedResponse listInstances(String parent) { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Lists all Redis instances owned by a project in either the specified location (region) or all - * locations. The location should have the following format: * - * `projects/{project_id}/locations/{location_id}` If `location_id` is specified as `-` - * (wildcard), then all regions available to the project are queried, and the results are - * aggregated. + * locations. + * + *

The location should have the following format: + * + *

    + *
  • `projects/{project_id}/locations/{location_id}` + *
+ * + *

If `location_id` is specified as `-` (wildcard), then all regions available to the project + * are queried, and the results are aggregated. * *

Sample code: * @@ -210,10 +239,16 @@ public final ListInstancesPagedResponse listInstances(ListInstancesRequest reque // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Lists all Redis instances owned by a project in either the specified location (region) or all - * locations. The location should have the following format: * - * `projects/{project_id}/locations/{location_id}` If `location_id` is specified as `-` - * (wildcard), then all regions available to the project are queried, and the results are - * aggregated. + * locations. + * + *

The location should have the following format: + * + *

    + *
  • `projects/{project_id}/locations/{location_id}` + *
+ * + *

If `location_id` is specified as `-` (wildcard), then all regions available to the project + * are queried, and the results are aggregated. * *

Sample code: */ @@ -225,10 +260,16 @@ public final ListInstancesPagedResponse listInstances(ListInstancesRequest reque // AUTO-GENERATED DOCUMENTATION AND METHOD. /** * Lists all Redis instances owned by a project in either the specified location (region) or all - * locations. The location should have the following format: * - * `projects/{project_id}/locations/{location_id}` If `location_id` is specified as `-` - * (wildcard), then all regions available to the project are queried, and the results are - * aggregated. + * locations. + * + *

The location should have the following format: + * + *

    + *
  • `projects/{project_id}/locations/{location_id}` + *
+ * + *

If `location_id` is specified as `-` (wildcard), then all regions available to the project + * are queried, and the results are aggregated. * *

Sample code: */ @@ -296,13 +337,18 @@ public final UnaryCallable getInstanceCallable() { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates a Redis instance based on the specified tier and memory size. By default, the instance - * is accessible from the project's [default network](https://cloud.google.com/vpc/docs/vpc). The - * creation is executed asynchronously and callers may check the returned operation to track its - * progress. Once the operation is completed the Redis instance will be fully functional. - * Completed longrunning.Operation will contain the new instance object in the response field. The - * returned operation is automatically deleted after a few hours, so there is no need to call - * DeleteOperation. + * Creates a Redis instance based on the specified tier and memory size. + * + *

By default, the instance is accessible from the project's [default + * network](https://cloud.google.com/vpc/docs/vpc). + * + *

The creation is executed asynchronously and callers may check the returned operation to + * track its progress. Once the operation is completed the Redis instance will be fully + * functional. Completed longrunning.Operation will contain the new instance object in the + * response field. + * + *

The returned operation is automatically deleted after a few hours, so there is no need to + * call DeleteOperation. * *

Sample code: * @@ -328,13 +374,18 @@ public final OperationFuture createInstanceAsync( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates a Redis instance based on the specified tier and memory size. By default, the instance - * is accessible from the project's [default network](https://cloud.google.com/vpc/docs/vpc). The - * creation is executed asynchronously and callers may check the returned operation to track its - * progress. Once the operation is completed the Redis instance will be fully functional. - * Completed longrunning.Operation will contain the new instance object in the response field. The - * returned operation is automatically deleted after a few hours, so there is no need to call - * DeleteOperation. + * Creates a Redis instance based on the specified tier and memory size. + * + *

By default, the instance is accessible from the project's [default + * network](https://cloud.google.com/vpc/docs/vpc). + * + *

The creation is executed asynchronously and callers may check the returned operation to + * track its progress. Once the operation is completed the Redis instance will be fully + * functional. Completed longrunning.Operation will contain the new instance object in the + * response field. + * + *

The returned operation is automatically deleted after a few hours, so there is no need to + * call DeleteOperation. * *

Sample code: * @@ -360,13 +411,18 @@ public final OperationFuture createInstanceAsync( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates a Redis instance based on the specified tier and memory size. By default, the instance - * is accessible from the project's [default network](https://cloud.google.com/vpc/docs/vpc). The - * creation is executed asynchronously and callers may check the returned operation to track its - * progress. Once the operation is completed the Redis instance will be fully functional. - * Completed longrunning.Operation will contain the new instance object in the response field. The - * returned operation is automatically deleted after a few hours, so there is no need to call - * DeleteOperation. + * Creates a Redis instance based on the specified tier and memory size. + * + *

By default, the instance is accessible from the project's [default + * network](https://cloud.google.com/vpc/docs/vpc). + * + *

The creation is executed asynchronously and callers may check the returned operation to + * track its progress. Once the operation is completed the Redis instance will be fully + * functional. Completed longrunning.Operation will contain the new instance object in the + * response field. + * + *

The returned operation is automatically deleted after a few hours, so there is no need to + * call DeleteOperation. * *

Sample code: * @@ -380,13 +436,18 @@ public final OperationFuture createInstanceAsync( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates a Redis instance based on the specified tier and memory size. By default, the instance - * is accessible from the project's [default network](https://cloud.google.com/vpc/docs/vpc). The - * creation is executed asynchronously and callers may check the returned operation to track its - * progress. Once the operation is completed the Redis instance will be fully functional. - * Completed longrunning.Operation will contain the new instance object in the response field. The - * returned operation is automatically deleted after a few hours, so there is no need to call - * DeleteOperation. + * Creates a Redis instance based on the specified tier and memory size. + * + *

By default, the instance is accessible from the project's [default + * network](https://cloud.google.com/vpc/docs/vpc). + * + *

The creation is executed asynchronously and callers may check the returned operation to + * track its progress. Once the operation is completed the Redis instance will be fully + * functional. Completed longrunning.Operation will contain the new instance object in the + * response field. + * + *

The returned operation is automatically deleted after a few hours, so there is no need to + * call DeleteOperation. * *

Sample code: */ @@ -397,13 +458,18 @@ public final OperationFuture createInstanceAsync( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates a Redis instance based on the specified tier and memory size. By default, the instance - * is accessible from the project's [default network](https://cloud.google.com/vpc/docs/vpc). The - * creation is executed asynchronously and callers may check the returned operation to track its - * progress. Once the operation is completed the Redis instance will be fully functional. - * Completed longrunning.Operation will contain the new instance object in the response field. The - * returned operation is automatically deleted after a few hours, so there is no need to call - * DeleteOperation. + * Creates a Redis instance based on the specified tier and memory size. + * + *

By default, the instance is accessible from the project's [default + * network](https://cloud.google.com/vpc/docs/vpc). + * + *

The creation is executed asynchronously and callers may check the returned operation to + * track its progress. Once the operation is completed the Redis instance will be fully + * functional. Completed longrunning.Operation will contain the new instance object in the + * response field. + * + *

The returned operation is automatically deleted after a few hours, so there is no need to + * call DeleteOperation. * *

Sample code: */ @@ -413,9 +479,10 @@ public final UnaryCallable createInstanceCalla // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Updates the metadata and configuration of a specific Redis instance. Completed - * longrunning.Operation will contain the new instance object in the response field. The returned - * operation is automatically deleted after a few hours, so there is no need to call + * Updates the metadata and configuration of a specific Redis instance. + * + *

Completed longrunning.Operation will contain the new instance object in the response field. + * The returned operation is automatically deleted after a few hours, so there is no need to call * DeleteOperation. * *

Sample code: @@ -436,9 +503,10 @@ public final OperationFuture updateInstanceAsync( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Updates the metadata and configuration of a specific Redis instance. Completed - * longrunning.Operation will contain the new instance object in the response field. The returned - * operation is automatically deleted after a few hours, so there is no need to call + * Updates the metadata and configuration of a specific Redis instance. + * + *

Completed longrunning.Operation will contain the new instance object in the response field. + * The returned operation is automatically deleted after a few hours, so there is no need to call * DeleteOperation. * *

Sample code: @@ -453,9 +521,10 @@ public final OperationFuture updateInstanceAsync( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Updates the metadata and configuration of a specific Redis instance. Completed - * longrunning.Operation will contain the new instance object in the response field. The returned - * operation is automatically deleted after a few hours, so there is no need to call + * Updates the metadata and configuration of a specific Redis instance. + * + *

Completed longrunning.Operation will contain the new instance object in the response field. + * The returned operation is automatically deleted after a few hours, so there is no need to call * DeleteOperation. * *

Sample code: @@ -467,9 +536,10 @@ public final OperationFuture updateInstanceAsync( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Updates the metadata and configuration of a specific Redis instance. Completed - * longrunning.Operation will contain the new instance object in the response field. The returned - * operation is automatically deleted after a few hours, so there is no need to call + * Updates the metadata and configuration of a specific Redis instance. + * + *

Completed longrunning.Operation will contain the new instance object in the response field. + * The returned operation is automatically deleted after a few hours, so there is no need to call * DeleteOperation. * *

Sample code: @@ -556,10 +626,13 @@ public final UnaryCallable upgradeInstanceCal // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Import a Redis RDB snapshot file from Cloud Storage into a Redis instance. Redis may stop - * serving during this operation. Instance state will be IMPORTING for entire operation. When - * complete, the instance will contain only data from the imported file. The returned operation is - * automatically deleted after a few hours, so there is no need to call DeleteOperation. + * Import a Redis RDB snapshot file from Cloud Storage into a Redis instance. + * + *

Redis may stop serving during this operation. Instance state will be IMPORTING for entire + * operation. When complete, the instance will contain only data from the imported file. + * + *

The returned operation is automatically deleted after a few hours, so there is no need to + * call DeleteOperation. * *

Sample code: * @@ -578,10 +651,13 @@ public final OperationFuture importInstanceAsync( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Import a Redis RDB snapshot file from Cloud Storage into a Redis instance. Redis may stop - * serving during this operation. Instance state will be IMPORTING for entire operation. When - * complete, the instance will contain only data from the imported file. The returned operation is - * automatically deleted after a few hours, so there is no need to call DeleteOperation. + * Import a Redis RDB snapshot file from Cloud Storage into a Redis instance. + * + *

Redis may stop serving during this operation. Instance state will be IMPORTING for entire + * operation. When complete, the instance will contain only data from the imported file. + * + *

The returned operation is automatically deleted after a few hours, so there is no need to + * call DeleteOperation. * *

Sample code: * @@ -595,10 +671,13 @@ public final OperationFuture importInstanceAsync( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Import a Redis RDB snapshot file from Cloud Storage into a Redis instance. Redis may stop - * serving during this operation. Instance state will be IMPORTING for entire operation. When - * complete, the instance will contain only data from the imported file. The returned operation is - * automatically deleted after a few hours, so there is no need to call DeleteOperation. + * Import a Redis RDB snapshot file from Cloud Storage into a Redis instance. + * + *

Redis may stop serving during this operation. Instance state will be IMPORTING for entire + * operation. When complete, the instance will contain only data from the imported file. + * + *

The returned operation is automatically deleted after a few hours, so there is no need to + * call DeleteOperation. * *

Sample code: */ @@ -609,10 +688,13 @@ public final OperationFuture importInstanceAsync( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Import a Redis RDB snapshot file from Cloud Storage into a Redis instance. Redis may stop - * serving during this operation. Instance state will be IMPORTING for entire operation. When - * complete, the instance will contain only data from the imported file. The returned operation is - * automatically deleted after a few hours, so there is no need to call DeleteOperation. + * Import a Redis RDB snapshot file from Cloud Storage into a Redis instance. + * + *

Redis may stop serving during this operation. Instance state will be IMPORTING for entire + * operation. When complete, the instance will contain only data from the imported file. + * + *

The returned operation is automatically deleted after a few hours, so there is no need to + * call DeleteOperation. * *

Sample code: */ @@ -622,9 +704,12 @@ public final UnaryCallable importInstanceCalla // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Export Redis instance data into a Redis RDB format file in Cloud Storage. Redis will continue - * serving during this operation. The returned operation is automatically deleted after a few - * hours, so there is no need to call DeleteOperation. + * Export Redis instance data into a Redis RDB format file in Cloud Storage. + * + *

Redis will continue serving during this operation. + * + *

The returned operation is automatically deleted after a few hours, so there is no need to + * call DeleteOperation. * *

Sample code: * @@ -643,9 +728,12 @@ public final OperationFuture exportInstanceAsync( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Export Redis instance data into a Redis RDB format file in Cloud Storage. Redis will continue - * serving during this operation. The returned operation is automatically deleted after a few - * hours, so there is no need to call DeleteOperation. + * Export Redis instance data into a Redis RDB format file in Cloud Storage. + * + *

Redis will continue serving during this operation. + * + *

The returned operation is automatically deleted after a few hours, so there is no need to + * call DeleteOperation. * *

Sample code: * @@ -659,9 +747,12 @@ public final OperationFuture exportInstanceAsync( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Export Redis instance data into a Redis RDB format file in Cloud Storage. Redis will continue - * serving during this operation. The returned operation is automatically deleted after a few - * hours, so there is no need to call DeleteOperation. + * Export Redis instance data into a Redis RDB format file in Cloud Storage. + * + *

Redis will continue serving during this operation. + * + *

The returned operation is automatically deleted after a few hours, so there is no need to + * call DeleteOperation. * *

Sample code: */ @@ -672,9 +763,12 @@ public final OperationFuture exportInstanceAsync( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Export Redis instance data into a Redis RDB format file in Cloud Storage. Redis will continue - * serving during this operation. The returned operation is automatically deleted after a few - * hours, so there is no need to call DeleteOperation. + * Export Redis instance data into a Redis RDB format file in Cloud Storage. + * + *

Redis will continue serving during this operation. + * + *

The returned operation is automatically deleted after a few hours, so there is no need to + * call DeleteOperation. * *

Sample code: */ diff --git a/test/integration/goldens/redis/package-info.java b/test/integration/goldens/redis/package-info.java index 8e1e2d6fb8..702bad9fc3 100644 --- a/test/integration/goldens/redis/package-info.java +++ b/test/integration/goldens/redis/package-info.java @@ -21,15 +21,26 @@ * *

======================= CloudRedisClient ======================= * - *

Service Description: Configures and manages Cloud Memorystore for Redis instances Google Cloud - * Memorystore for Redis v1 The `redis.googleapis.com` service implements the Google Cloud - * Memorystore for Redis API and defines the following resource model for managing Redis instances: - * * The service works with a collection of cloud projects, named: `/projects/*` * Each project has - * a collection of available locations, named: `/locations/*` * Each location has a collection of - * Redis instances, named: `/instances/*` * As such, Redis instances are resources of the form: - * `/projects/{project_id}/locations/{location_id}/instances/{instance_id}` Note that location_id - * must be referring to a GCP `region`; for example: * - * `projects/redpepper-1290/locations/us-central1/instances/my-redis` + *

Service Description: Configures and manages Cloud Memorystore for Redis instances + * + *

Google Cloud Memorystore for Redis v1 + * + *

The `redis.googleapis.com` service implements the Google Cloud Memorystore for Redis API and + * defines the following resource model for managing Redis instances: + * + *

    + *
  • The service works with a collection of cloud projects, named: `/projects/*` + *
  • Each project has a collection of available locations, named: `/locations/*` + *
  • Each location has a collection of Redis instances, named: `/instances/*` + *
  • As such, Redis instances are resources of the form: + * `/projects/{project_id}/locations/{location_id}/instances/{instance_id}` + *
+ * + *

Note that location_id must be referring to a GCP `region`; for example: + * + *

    + *
  • `projects/redpepper-1290/locations/us-central1/instances/my-redis` + *
* *

Sample for CloudRedisClient: */