diff --git a/google-api-grpc/grpc-google-cloud-texttospeech-v1/pom.xml b/google-api-grpc/grpc-google-cloud-texttospeech-v1/pom.xml new file mode 100644 index 000000000000..e19d75119bc2 --- /dev/null +++ b/google-api-grpc/grpc-google-cloud-texttospeech-v1/pom.xml @@ -0,0 +1,31 @@ + + 4.0.0 + grpc-google-cloud-texttospeech-v1 + 0.14.1-SNAPSHOT + grpc-google-cloud-texttospeech-v1 + GRPC library for grpc-google-cloud-texttospeech-v1 + + com.google.api.grpc + google-api-grpc + 0.14.1-SNAPSHOT + + + + io.grpc + grpc-stub + compile + + + io.grpc + grpc-protobuf + compile + + + com.google.api.grpc + proto-google-cloud-texttospeech-v1 + compile + + + diff --git a/google-api-grpc/grpc-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechGrpc.java b/google-api-grpc/grpc-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechGrpc.java new file mode 100644 index 000000000000..555b17ebc353 --- /dev/null +++ b/google-api-grpc/grpc-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechGrpc.java @@ -0,0 +1,413 @@ +package com.google.cloud.texttospeech.v1; + +import static io.grpc.MethodDescriptor.generateFullMethodName; +import static io.grpc.stub.ClientCalls.asyncBidiStreamingCall; +import static io.grpc.stub.ClientCalls.asyncClientStreamingCall; +import static io.grpc.stub.ClientCalls.asyncServerStreamingCall; +import static io.grpc.stub.ClientCalls.asyncUnaryCall; +import static io.grpc.stub.ClientCalls.blockingServerStreamingCall; +import static io.grpc.stub.ClientCalls.blockingUnaryCall; +import static io.grpc.stub.ClientCalls.futureUnaryCall; +import static io.grpc.stub.ServerCalls.asyncBidiStreamingCall; +import static io.grpc.stub.ServerCalls.asyncClientStreamingCall; +import static io.grpc.stub.ServerCalls.asyncServerStreamingCall; +import static io.grpc.stub.ServerCalls.asyncUnaryCall; +import static io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall; +import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; + +/** + *
+ * Service that implements Google Cloud Text-to-Speech API.
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler (version 1.10.0)", + comments = "Source: google/cloud/texttospeech/v1/cloud_tts.proto") +public final class TextToSpeechGrpc { + + private TextToSpeechGrpc() {} + + public static final String SERVICE_NAME = "google.cloud.texttospeech.v1.TextToSpeech"; + + // Static method descriptors that strictly reflect the proto. + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getListVoicesMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_LIST_VOICES = getListVoicesMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getListVoicesMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getListVoicesMethod() { + return getListVoicesMethodHelper(); + } + + private static io.grpc.MethodDescriptor getListVoicesMethodHelper() { + io.grpc.MethodDescriptor getListVoicesMethod; + if ((getListVoicesMethod = TextToSpeechGrpc.getListVoicesMethod) == null) { + synchronized (TextToSpeechGrpc.class) { + if ((getListVoicesMethod = TextToSpeechGrpc.getListVoicesMethod) == null) { + TextToSpeechGrpc.getListVoicesMethod = getListVoicesMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.texttospeech.v1.TextToSpeech", "ListVoices")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.texttospeech.v1.ListVoicesRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.texttospeech.v1.ListVoicesResponse.getDefaultInstance())) + .setSchemaDescriptor(new TextToSpeechMethodDescriptorSupplier("ListVoices")) + .build(); + } + } + } + return getListVoicesMethod; + } + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getSynthesizeSpeechMethod()} instead. + public static final io.grpc.MethodDescriptor METHOD_SYNTHESIZE_SPEECH = getSynthesizeSpeechMethodHelper(); + + private static volatile io.grpc.MethodDescriptor getSynthesizeSpeechMethod; + + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + public static io.grpc.MethodDescriptor getSynthesizeSpeechMethod() { + return getSynthesizeSpeechMethodHelper(); + } + + private static io.grpc.MethodDescriptor getSynthesizeSpeechMethodHelper() { + io.grpc.MethodDescriptor getSynthesizeSpeechMethod; + if ((getSynthesizeSpeechMethod = TextToSpeechGrpc.getSynthesizeSpeechMethod) == null) { + synchronized (TextToSpeechGrpc.class) { + if ((getSynthesizeSpeechMethod = TextToSpeechGrpc.getSynthesizeSpeechMethod) == null) { + TextToSpeechGrpc.getSynthesizeSpeechMethod = getSynthesizeSpeechMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName( + "google.cloud.texttospeech.v1.TextToSpeech", "SynthesizeSpeech")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse.getDefaultInstance())) + .setSchemaDescriptor(new TextToSpeechMethodDescriptorSupplier("SynthesizeSpeech")) + .build(); + } + } + } + return getSynthesizeSpeechMethod; + } + + /** + * Creates a new async stub that supports all call types for the service + */ + public static TextToSpeechStub newStub(io.grpc.Channel channel) { + return new TextToSpeechStub(channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static TextToSpeechBlockingStub newBlockingStub( + io.grpc.Channel channel) { + return new TextToSpeechBlockingStub(channel); + } + + /** + * Creates a new ListenableFuture-style stub that supports unary calls on the service + */ + public static TextToSpeechFutureStub newFutureStub( + io.grpc.Channel channel) { + return new TextToSpeechFutureStub(channel); + } + + /** + *
+   * Service that implements Google Cloud Text-to-Speech API.
+   * 
+ */ + public static abstract class TextToSpeechImplBase implements io.grpc.BindableService { + + /** + *
+     * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice]
+     * supported for synthesis.
+     * 
+ */ + public void listVoices(com.google.cloud.texttospeech.v1.ListVoicesRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getListVoicesMethodHelper(), responseObserver); + } + + /** + *
+     * Synthesizes speech synchronously: receive results after all text input
+     * has been processed.
+     * 
+ */ + public void synthesizeSpeech(com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnimplementedUnaryCall(getSynthesizeSpeechMethodHelper(), responseObserver); + } + + @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getListVoicesMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.texttospeech.v1.ListVoicesRequest, + com.google.cloud.texttospeech.v1.ListVoicesResponse>( + this, METHODID_LIST_VOICES))) + .addMethod( + getSynthesizeSpeechMethodHelper(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest, + com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse>( + this, METHODID_SYNTHESIZE_SPEECH))) + .build(); + } + } + + /** + *
+   * Service that implements Google Cloud Text-to-Speech API.
+   * 
+ */ + public static final class TextToSpeechStub extends io.grpc.stub.AbstractStub { + private TextToSpeechStub(io.grpc.Channel channel) { + super(channel); + } + + private TextToSpeechStub(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected TextToSpeechStub build(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + return new TextToSpeechStub(channel, callOptions); + } + + /** + *
+     * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice]
+     * supported for synthesis.
+     * 
+ */ + public void listVoices(com.google.cloud.texttospeech.v1.ListVoicesRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getListVoicesMethodHelper(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Synthesizes speech synchronously: receive results after all text input
+     * has been processed.
+     * 
+ */ + public void synthesizeSpeech(com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest request, + io.grpc.stub.StreamObserver responseObserver) { + asyncUnaryCall( + getChannel().newCall(getSynthesizeSpeechMethodHelper(), getCallOptions()), request, responseObserver); + } + } + + /** + *
+   * Service that implements Google Cloud Text-to-Speech API.
+   * 
+ */ + public static final class TextToSpeechBlockingStub extends io.grpc.stub.AbstractStub { + private TextToSpeechBlockingStub(io.grpc.Channel channel) { + super(channel); + } + + private TextToSpeechBlockingStub(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected TextToSpeechBlockingStub build(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + return new TextToSpeechBlockingStub(channel, callOptions); + } + + /** + *
+     * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice]
+     * supported for synthesis.
+     * 
+ */ + public com.google.cloud.texttospeech.v1.ListVoicesResponse listVoices(com.google.cloud.texttospeech.v1.ListVoicesRequest request) { + return blockingUnaryCall( + getChannel(), getListVoicesMethodHelper(), getCallOptions(), request); + } + + /** + *
+     * Synthesizes speech synchronously: receive results after all text input
+     * has been processed.
+     * 
+ */ + public com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse synthesizeSpeech(com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest request) { + return blockingUnaryCall( + getChannel(), getSynthesizeSpeechMethodHelper(), getCallOptions(), request); + } + } + + /** + *
+   * Service that implements Google Cloud Text-to-Speech API.
+   * 
+ */ + public static final class TextToSpeechFutureStub extends io.grpc.stub.AbstractStub { + private TextToSpeechFutureStub(io.grpc.Channel channel) { + super(channel); + } + + private TextToSpeechFutureStub(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected TextToSpeechFutureStub build(io.grpc.Channel channel, + io.grpc.CallOptions callOptions) { + return new TextToSpeechFutureStub(channel, callOptions); + } + + /** + *
+     * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice]
+     * supported for synthesis.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture listVoices( + com.google.cloud.texttospeech.v1.ListVoicesRequest request) { + return futureUnaryCall( + getChannel().newCall(getListVoicesMethodHelper(), getCallOptions()), request); + } + + /** + *
+     * Synthesizes speech synchronously: receive results after all text input
+     * has been processed.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture synthesizeSpeech( + com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest request) { + return futureUnaryCall( + getChannel().newCall(getSynthesizeSpeechMethodHelper(), getCallOptions()), request); + } + } + + private static final int METHODID_LIST_VOICES = 0; + private static final int METHODID_SYNTHESIZE_SPEECH = 1; + + private static final class MethodHandlers implements + io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final TextToSpeechImplBase serviceImpl; + private final int methodId; + + MethodHandlers(TextToSpeechImplBase serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_LIST_VOICES: + serviceImpl.listVoices((com.google.cloud.texttospeech.v1.ListVoicesRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_SYNTHESIZE_SPEECH: + serviceImpl.synthesizeSpeech((com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + private static abstract class TextToSpeechBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { + TextToSpeechBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("TextToSpeech"); + } + } + + private static final class TextToSpeechFileDescriptorSupplier + extends TextToSpeechBaseDescriptorSupplier { + TextToSpeechFileDescriptorSupplier() {} + } + + private static final class TextToSpeechMethodDescriptorSupplier + extends TextToSpeechBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final String methodName; + + TextToSpeechMethodDescriptorSupplier(String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (TextToSpeechGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new TextToSpeechFileDescriptorSupplier()) + .addMethod(getListVoicesMethodHelper()) + .addMethod(getSynthesizeSpeechMethodHelper()) + .build(); + } + } + } + return result; + } +} diff --git a/google-api-grpc/pom.xml b/google-api-grpc/pom.xml index 658712dc4177..a32cc6772202 100644 --- a/google-api-grpc/pom.xml +++ b/google-api-grpc/pom.xml @@ -356,6 +356,16 @@ grpc-google-cloud-speech-v1 0.14.1-SNAPSHOT + + com.google.api.grpc + proto-google-cloud-texttospeech-v1 + 0.14.1-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-texttospeech-v1 + 0.14.1-SNAPSHOT + com.google.api.grpc proto-google-cloud-texttospeech-v1beta1 @@ -512,6 +522,7 @@ grpc-google-cloud-speech-v1 grpc-google-cloud-speech-v1beta1 grpc-google-cloud-speech-v1p1beta1 + grpc-google-cloud-texttospeech-v1 grpc-google-cloud-texttospeech-v1beta1 grpc-google-cloud-trace-v1 grpc-google-cloud-trace-v2 @@ -548,6 +559,7 @@ proto-google-cloud-speech-v1 proto-google-cloud-speech-v1beta1 proto-google-cloud-speech-v1p1beta1 + proto-google-cloud-texttospeech-v1 proto-google-cloud-texttospeech-v1beta1 proto-google-cloud-trace-v1 proto-google-cloud-trace-v2 diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/pom.xml b/google-api-grpc/proto-google-cloud-texttospeech-v1/pom.xml new file mode 100644 index 000000000000..80857b2dc53e --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/pom.xml @@ -0,0 +1,31 @@ + + 4.0.0 + proto-google-cloud-texttospeech-v1 + 0.14.1-SNAPSHOT + proto-google-cloud-texttospeech-v1 + PROTO library for proto-google-cloud-texttospeech-v1 + + com.google.api.grpc + google-api-grpc + 0.14.1-SNAPSHOT + + + + com.google.protobuf + protobuf-java + compile + + + com.google.api + api-common + compile + + + com.google.api.grpc + proto-google-common-protos + compile + + + diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/AudioConfig.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/AudioConfig.java new file mode 100644 index 000000000000..321ef1b3ea24 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/AudioConfig.java @@ -0,0 +1,881 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +/** + *
+ * Description of audio data to be synthesized.
+ * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.AudioConfig} + */ +public final class AudioConfig extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.AudioConfig) + AudioConfigOrBuilder { +private static final long serialVersionUID = 0L; + // Use AudioConfig.newBuilder() to construct. + private AudioConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private AudioConfig() { + audioEncoding_ = 0; + speakingRate_ = 0D; + pitch_ = 0D; + volumeGainDb_ = 0D; + sampleRateHertz_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AudioConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + + audioEncoding_ = rawValue; + break; + } + case 17: { + + speakingRate_ = input.readDouble(); + break; + } + case 25: { + + pitch_ = input.readDouble(); + break; + } + case 33: { + + volumeGainDb_ = input.readDouble(); + break; + } + case 40: { + + sampleRateHertz_ = input.readInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_AudioConfig_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_AudioConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.AudioConfig.class, com.google.cloud.texttospeech.v1.AudioConfig.Builder.class); + } + + public static final int AUDIO_ENCODING_FIELD_NUMBER = 1; + private int audioEncoding_; + /** + *
+   * Required. The format of the requested audio byte stream.
+   * 
+ * + * .google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1; + */ + public int getAudioEncodingValue() { + return audioEncoding_; + } + /** + *
+   * Required. The format of the requested audio byte stream.
+   * 
+ * + * .google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1; + */ + public com.google.cloud.texttospeech.v1.AudioEncoding getAudioEncoding() { + com.google.cloud.texttospeech.v1.AudioEncoding result = com.google.cloud.texttospeech.v1.AudioEncoding.valueOf(audioEncoding_); + return result == null ? com.google.cloud.texttospeech.v1.AudioEncoding.UNRECOGNIZED : result; + } + + public static final int SPEAKING_RATE_FIELD_NUMBER = 2; + private double speakingRate_; + /** + *
+   * Optional speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
+   * native speed supported by the specific voice. 2.0 is twice as fast, and
+   * 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
+   * other values < 0.25 or > 4.0 will return an error.
+   * 
+ * + * double speaking_rate = 2; + */ + public double getSpeakingRate() { + return speakingRate_; + } + + public static final int PITCH_FIELD_NUMBER = 3; + private double pitch_; + /** + *
+   * Optional speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
+   * semitones from the original pitch. -20 means decrease 20 semitones from the
+   * original pitch.
+   * 
+ * + * double pitch = 3; + */ + public double getPitch() { + return pitch_; + } + + public static final int VOLUME_GAIN_DB_FIELD_NUMBER = 4; + private double volumeGainDb_; + /** + *
+   * Optional volume gain (in dB) of the normal native volume supported by the
+   * specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
+   * 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
+   * will play at approximately half the amplitude of the normal native signal
+   * amplitude. A value of +6.0 (dB) will play at approximately twice the
+   * amplitude of the normal native signal amplitude. Strongly recommend not to
+   * exceed +10 (dB) as there's usually no effective increase in loudness for
+   * any value greater than that.
+   * 
+ * + * double volume_gain_db = 4; + */ + public double getVolumeGainDb() { + return volumeGainDb_; + } + + public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER = 5; + private int sampleRateHertz_; + /** + *
+   * The synthesis sample rate (in hertz) for this audio. Optional.  If this is
+   * different from the voice's natural sample rate, then the synthesizer will
+   * honor this request by converting to the desired sample rate (which might
+   * result in worse audio quality), unless the specified sample rate is not
+   * supported for the encoding chosen, in which case it will fail the request
+   * and return [google.rpc.Code.INVALID_ARGUMENT][].
+   * 
+ * + * int32 sample_rate_hertz = 5; + */ + public int getSampleRateHertz() { + return sampleRateHertz_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (audioEncoding_ != com.google.cloud.texttospeech.v1.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED.getNumber()) { + output.writeEnum(1, audioEncoding_); + } + if (speakingRate_ != 0D) { + output.writeDouble(2, speakingRate_); + } + if (pitch_ != 0D) { + output.writeDouble(3, pitch_); + } + if (volumeGainDb_ != 0D) { + output.writeDouble(4, volumeGainDb_); + } + if (sampleRateHertz_ != 0) { + output.writeInt32(5, sampleRateHertz_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (audioEncoding_ != com.google.cloud.texttospeech.v1.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, audioEncoding_); + } + if (speakingRate_ != 0D) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(2, speakingRate_); + } + if (pitch_ != 0D) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(3, pitch_); + } + if (volumeGainDb_ != 0D) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(4, volumeGainDb_); + } + if (sampleRateHertz_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(5, sampleRateHertz_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.texttospeech.v1.AudioConfig)) { + return super.equals(obj); + } + com.google.cloud.texttospeech.v1.AudioConfig other = (com.google.cloud.texttospeech.v1.AudioConfig) obj; + + boolean result = true; + result = result && audioEncoding_ == other.audioEncoding_; + result = result && ( + java.lang.Double.doubleToLongBits(getSpeakingRate()) + == java.lang.Double.doubleToLongBits( + other.getSpeakingRate())); + result = result && ( + java.lang.Double.doubleToLongBits(getPitch()) + == java.lang.Double.doubleToLongBits( + other.getPitch())); + result = result && ( + java.lang.Double.doubleToLongBits(getVolumeGainDb()) + == java.lang.Double.doubleToLongBits( + other.getVolumeGainDb())); + result = result && (getSampleRateHertz() + == other.getSampleRateHertz()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + AUDIO_ENCODING_FIELD_NUMBER; + hash = (53 * hash) + audioEncoding_; + hash = (37 * hash) + SPEAKING_RATE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getSpeakingRate())); + hash = (37 * hash) + PITCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getPitch())); + hash = (37 * hash) + VOLUME_GAIN_DB_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getVolumeGainDb())); + hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER; + hash = (53 * hash) + getSampleRateHertz(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.AudioConfig parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.AudioConfig parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.texttospeech.v1.AudioConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Description of audio data to be synthesized.
+   * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.AudioConfig} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.texttospeech.v1.AudioConfig) + com.google.cloud.texttospeech.v1.AudioConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_AudioConfig_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_AudioConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.AudioConfig.class, com.google.cloud.texttospeech.v1.AudioConfig.Builder.class); + } + + // Construct using com.google.cloud.texttospeech.v1.AudioConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + audioEncoding_ = 0; + + speakingRate_ = 0D; + + pitch_ = 0D; + + volumeGainDb_ = 0D; + + sampleRateHertz_ = 0; + + return this; + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_AudioConfig_descriptor; + } + + public com.google.cloud.texttospeech.v1.AudioConfig getDefaultInstanceForType() { + return com.google.cloud.texttospeech.v1.AudioConfig.getDefaultInstance(); + } + + public com.google.cloud.texttospeech.v1.AudioConfig build() { + com.google.cloud.texttospeech.v1.AudioConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.google.cloud.texttospeech.v1.AudioConfig buildPartial() { + com.google.cloud.texttospeech.v1.AudioConfig result = new com.google.cloud.texttospeech.v1.AudioConfig(this); + result.audioEncoding_ = audioEncoding_; + result.speakingRate_ = speakingRate_; + result.pitch_ = pitch_; + result.volumeGainDb_ = volumeGainDb_; + result.sampleRateHertz_ = sampleRateHertz_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.texttospeech.v1.AudioConfig) { + return mergeFrom((com.google.cloud.texttospeech.v1.AudioConfig)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.texttospeech.v1.AudioConfig other) { + if (other == com.google.cloud.texttospeech.v1.AudioConfig.getDefaultInstance()) return this; + if (other.audioEncoding_ != 0) { + setAudioEncodingValue(other.getAudioEncodingValue()); + } + if (other.getSpeakingRate() != 0D) { + setSpeakingRate(other.getSpeakingRate()); + } + if (other.getPitch() != 0D) { + setPitch(other.getPitch()); + } + if (other.getVolumeGainDb() != 0D) { + setVolumeGainDb(other.getVolumeGainDb()); + } + if (other.getSampleRateHertz() != 0) { + setSampleRateHertz(other.getSampleRateHertz()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.texttospeech.v1.AudioConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.texttospeech.v1.AudioConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int audioEncoding_ = 0; + /** + *
+     * Required. The format of the requested audio byte stream.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1; + */ + public int getAudioEncodingValue() { + return audioEncoding_; + } + /** + *
+     * Required. The format of the requested audio byte stream.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1; + */ + public Builder setAudioEncodingValue(int value) { + audioEncoding_ = value; + onChanged(); + return this; + } + /** + *
+     * Required. The format of the requested audio byte stream.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1; + */ + public com.google.cloud.texttospeech.v1.AudioEncoding getAudioEncoding() { + com.google.cloud.texttospeech.v1.AudioEncoding result = com.google.cloud.texttospeech.v1.AudioEncoding.valueOf(audioEncoding_); + return result == null ? com.google.cloud.texttospeech.v1.AudioEncoding.UNRECOGNIZED : result; + } + /** + *
+     * Required. The format of the requested audio byte stream.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1; + */ + public Builder setAudioEncoding(com.google.cloud.texttospeech.v1.AudioEncoding value) { + if (value == null) { + throw new NullPointerException(); + } + + audioEncoding_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
+     * Required. The format of the requested audio byte stream.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1; + */ + public Builder clearAudioEncoding() { + + audioEncoding_ = 0; + onChanged(); + return this; + } + + private double speakingRate_ ; + /** + *
+     * Optional speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
+     * native speed supported by the specific voice. 2.0 is twice as fast, and
+     * 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
+     * other values < 0.25 or > 4.0 will return an error.
+     * 
+ * + * double speaking_rate = 2; + */ + public double getSpeakingRate() { + return speakingRate_; + } + /** + *
+     * Optional speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
+     * native speed supported by the specific voice. 2.0 is twice as fast, and
+     * 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
+     * other values < 0.25 or > 4.0 will return an error.
+     * 
+ * + * double speaking_rate = 2; + */ + public Builder setSpeakingRate(double value) { + + speakingRate_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
+     * native speed supported by the specific voice. 2.0 is twice as fast, and
+     * 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
+     * other values < 0.25 or > 4.0 will return an error.
+     * 
+ * + * double speaking_rate = 2; + */ + public Builder clearSpeakingRate() { + + speakingRate_ = 0D; + onChanged(); + return this; + } + + private double pitch_ ; + /** + *
+     * Optional speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
+     * semitones from the original pitch. -20 means decrease 20 semitones from the
+     * original pitch.
+     * 
+ * + * double pitch = 3; + */ + public double getPitch() { + return pitch_; + } + /** + *
+     * Optional speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
+     * semitones from the original pitch. -20 means decrease 20 semitones from the
+     * original pitch.
+     * 
+ * + * double pitch = 3; + */ + public Builder setPitch(double value) { + + pitch_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
+     * semitones from the original pitch. -20 means decrease 20 semitones from the
+     * original pitch.
+     * 
+ * + * double pitch = 3; + */ + public Builder clearPitch() { + + pitch_ = 0D; + onChanged(); + return this; + } + + private double volumeGainDb_ ; + /** + *
+     * Optional volume gain (in dB) of the normal native volume supported by the
+     * specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
+     * 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
+     * will play at approximately half the amplitude of the normal native signal
+     * amplitude. A value of +6.0 (dB) will play at approximately twice the
+     * amplitude of the normal native signal amplitude. Strongly recommend not to
+     * exceed +10 (dB) as there's usually no effective increase in loudness for
+     * any value greater than that.
+     * 
+ * + * double volume_gain_db = 4; + */ + public double getVolumeGainDb() { + return volumeGainDb_; + } + /** + *
+     * Optional volume gain (in dB) of the normal native volume supported by the
+     * specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
+     * 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
+     * will play at approximately half the amplitude of the normal native signal
+     * amplitude. A value of +6.0 (dB) will play at approximately twice the
+     * amplitude of the normal native signal amplitude. Strongly recommend not to
+     * exceed +10 (dB) as there's usually no effective increase in loudness for
+     * any value greater than that.
+     * 
+ * + * double volume_gain_db = 4; + */ + public Builder setVolumeGainDb(double value) { + + volumeGainDb_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional volume gain (in dB) of the normal native volume supported by the
+     * specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
+     * 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
+     * will play at approximately half the amplitude of the normal native signal
+     * amplitude. A value of +6.0 (dB) will play at approximately twice the
+     * amplitude of the normal native signal amplitude. Strongly recommend not to
+     * exceed +10 (dB) as there's usually no effective increase in loudness for
+     * any value greater than that.
+     * 
+ * + * double volume_gain_db = 4; + */ + public Builder clearVolumeGainDb() { + + volumeGainDb_ = 0D; + onChanged(); + return this; + } + + private int sampleRateHertz_ ; + /** + *
+     * The synthesis sample rate (in hertz) for this audio. Optional.  If this is
+     * different from the voice's natural sample rate, then the synthesizer will
+     * honor this request by converting to the desired sample rate (which might
+     * result in worse audio quality), unless the specified sample rate is not
+     * supported for the encoding chosen, in which case it will fail the request
+     * and return [google.rpc.Code.INVALID_ARGUMENT][].
+     * 
+ * + * int32 sample_rate_hertz = 5; + */ + public int getSampleRateHertz() { + return sampleRateHertz_; + } + /** + *
+     * The synthesis sample rate (in hertz) for this audio. Optional.  If this is
+     * different from the voice's natural sample rate, then the synthesizer will
+     * honor this request by converting to the desired sample rate (which might
+     * result in worse audio quality), unless the specified sample rate is not
+     * supported for the encoding chosen, in which case it will fail the request
+     * and return [google.rpc.Code.INVALID_ARGUMENT][].
+     * 
+ * + * int32 sample_rate_hertz = 5; + */ + public Builder setSampleRateHertz(int value) { + + sampleRateHertz_ = value; + onChanged(); + return this; + } + /** + *
+     * The synthesis sample rate (in hertz) for this audio. Optional.  If this is
+     * different from the voice's natural sample rate, then the synthesizer will
+     * honor this request by converting to the desired sample rate (which might
+     * result in worse audio quality), unless the specified sample rate is not
+     * supported for the encoding chosen, in which case it will fail the request
+     * and return [google.rpc.Code.INVALID_ARGUMENT][].
+     * 
+ * + * int32 sample_rate_hertz = 5; + */ + public Builder clearSampleRateHertz() { + + sampleRateHertz_ = 0; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.AudioConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.AudioConfig) + private static final com.google.cloud.texttospeech.v1.AudioConfig DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.AudioConfig(); + } + + public static com.google.cloud.texttospeech.v1.AudioConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + public AudioConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AudioConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public com.google.cloud.texttospeech.v1.AudioConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/AudioConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/AudioConfigOrBuilder.java new file mode 100644 index 000000000000..af07139da48a --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/AudioConfigOrBuilder.java @@ -0,0 +1,79 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +public interface AudioConfigOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.texttospeech.v1.AudioConfig) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The format of the requested audio byte stream.
+   * 
+ * + * .google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1; + */ + int getAudioEncodingValue(); + /** + *
+   * Required. The format of the requested audio byte stream.
+   * 
+ * + * .google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1; + */ + com.google.cloud.texttospeech.v1.AudioEncoding getAudioEncoding(); + + /** + *
+   * Optional speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
+   * native speed supported by the specific voice. 2.0 is twice as fast, and
+   * 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
+   * other values < 0.25 or > 4.0 will return an error.
+   * 
+ * + * double speaking_rate = 2; + */ + double getSpeakingRate(); + + /** + *
+   * Optional speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
+   * semitones from the original pitch. -20 means decrease 20 semitones from the
+   * original pitch.
+   * 
+ * + * double pitch = 3; + */ + double getPitch(); + + /** + *
+   * Optional volume gain (in dB) of the normal native volume supported by the
+   * specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
+   * 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
+   * will play at approximately half the amplitude of the normal native signal
+   * amplitude. A value of +6.0 (dB) will play at approximately twice the
+   * amplitude of the normal native signal amplitude. Strongly recommend not to
+   * exceed +10 (dB) as there's usually no effective increase in loudness for
+   * any value greater than that.
+   * 
+ * + * double volume_gain_db = 4; + */ + double getVolumeGainDb(); + + /** + *
+   * The synthesis sample rate (in hertz) for this audio. Optional.  If this is
+   * different from the voice's natural sample rate, then the synthesizer will
+   * honor this request by converting to the desired sample rate (which might
+   * result in worse audio quality), unless the specified sample rate is not
+   * supported for the encoding chosen, in which case it will fail the request
+   * and return [google.rpc.Code.INVALID_ARGUMENT][].
+   * 
+ * + * int32 sample_rate_hertz = 5; + */ + int getSampleRateHertz(); +} diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/AudioEncoding.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/AudioEncoding.java new file mode 100644 index 000000000000..adbeb00b61bf --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/AudioEncoding.java @@ -0,0 +1,166 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +/** + *
+ * Configuration to set up audio encoder. The encoding determines the output
+ * audio format that we'd like.
+ * 
+ * + * Protobuf enum {@code google.cloud.texttospeech.v1.AudioEncoding} + */ +public enum AudioEncoding + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+   * Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
+   * 
+ * + * AUDIO_ENCODING_UNSPECIFIED = 0; + */ + AUDIO_ENCODING_UNSPECIFIED(0), + /** + *
+   * Uncompressed 16-bit signed little-endian samples (Linear PCM).
+   * Audio content returned as LINEAR16 also contains a WAV header.
+   * 
+ * + * LINEAR16 = 1; + */ + LINEAR16(1), + /** + *
+   * MP3 audio.
+   * 
+ * + * MP3 = 2; + */ + MP3(2), + /** + *
+   * Opus encoded audio wrapped in an ogg container. The result will be a
+   * file which can be played natively on Android, and in browsers (at least
+   * Chrome and Firefox). The quality of the encoding is considerably higher
+   * than MP3 while using approximately the same bitrate.
+   * 
+ * + * OGG_OPUS = 3; + */ + OGG_OPUS(3), + UNRECOGNIZED(-1), + ; + + /** + *
+   * Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
+   * 
+ * + * AUDIO_ENCODING_UNSPECIFIED = 0; + */ + public static final int AUDIO_ENCODING_UNSPECIFIED_VALUE = 0; + /** + *
+   * Uncompressed 16-bit signed little-endian samples (Linear PCM).
+   * Audio content returned as LINEAR16 also contains a WAV header.
+   * 
+ * + * LINEAR16 = 1; + */ + public static final int LINEAR16_VALUE = 1; + /** + *
+   * MP3 audio.
+   * 
+ * + * MP3 = 2; + */ + public static final int MP3_VALUE = 2; + /** + *
+   * Opus encoded audio wrapped in an ogg container. The result will be a
+   * file which can be played natively on Android, and in browsers (at least
+   * Chrome and Firefox). The quality of the encoding is considerably higher
+   * than MP3 while using approximately the same bitrate.
+   * 
+ * + * OGG_OPUS = 3; + */ + public static final int OGG_OPUS_VALUE = 3; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static AudioEncoding valueOf(int value) { + return forNumber(value); + } + + public static AudioEncoding forNumber(int value) { + switch (value) { + case 0: return AUDIO_ENCODING_UNSPECIFIED; + case 1: return LINEAR16; + case 2: return MP3; + case 3: return OGG_OPUS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + AudioEncoding> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public AudioEncoding findValueByNumber(int number) { + return AudioEncoding.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.getDescriptor().getEnumTypes().get(1); + } + + private static final AudioEncoding[] VALUES = values(); + + public static AudioEncoding valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private AudioEncoding(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.texttospeech.v1.AudioEncoding) +} + diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequest.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequest.java new file mode 100644 index 000000000000..c76ded93fa8f --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequest.java @@ -0,0 +1,601 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +/** + *
+ * The top-level message sent by the client for the `ListVoices` method.
+ * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.ListVoicesRequest} + */ +public final class ListVoicesRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.ListVoicesRequest) + ListVoicesRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use ListVoicesRequest.newBuilder() to construct. + private ListVoicesRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ListVoicesRequest() { + languageCode_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListVoicesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + languageCode_ = s; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.ListVoicesRequest.class, com.google.cloud.texttospeech.v1.ListVoicesRequest.Builder.class); + } + + public static final int LANGUAGE_CODE_FIELD_NUMBER = 1; + private volatile java.lang.Object languageCode_; + /** + *
+   * Optional (but recommended)
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
+   * specified, the ListVoices call will only return voices that can be used to
+   * synthesize this language_code. E.g. when specifying "en-NZ", you will get
+   * supported "en-*" voices; when specifying "no", you will get supported
+   * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh"
+   * will also get supported "cmn-*" voices; specifying "zh-hk" will also get
+   * supported "yue-*" voices.
+   * 
+ * + * string language_code = 1; + */ + public java.lang.String getLanguageCode() { + java.lang.Object ref = languageCode_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + languageCode_ = s; + return s; + } + } + /** + *
+   * Optional (but recommended)
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
+   * specified, the ListVoices call will only return voices that can be used to
+   * synthesize this language_code. E.g. when specifying "en-NZ", you will get
+   * supported "en-*" voices; when specifying "no", you will get supported
+   * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh"
+   * will also get supported "cmn-*" voices; specifying "zh-hk" will also get
+   * supported "yue-*" voices.
+   * 
+ * + * string language_code = 1; + */ + public com.google.protobuf.ByteString + getLanguageCodeBytes() { + java.lang.Object ref = languageCode_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + languageCode_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getLanguageCodeBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, languageCode_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getLanguageCodeBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, languageCode_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.texttospeech.v1.ListVoicesRequest)) { + return super.equals(obj); + } + com.google.cloud.texttospeech.v1.ListVoicesRequest other = (com.google.cloud.texttospeech.v1.ListVoicesRequest) obj; + + boolean result = true; + result = result && getLanguageCode() + .equals(other.getLanguageCode()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER; + hash = (53 * hash) + getLanguageCode().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.texttospeech.v1.ListVoicesRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * The top-level message sent by the client for the `ListVoices` method.
+   * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.ListVoicesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.texttospeech.v1.ListVoicesRequest) + com.google.cloud.texttospeech.v1.ListVoicesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.ListVoicesRequest.class, com.google.cloud.texttospeech.v1.ListVoicesRequest.Builder.class); + } + + // Construct using com.google.cloud.texttospeech.v1.ListVoicesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + languageCode_ = ""; + + return this; + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_descriptor; + } + + public com.google.cloud.texttospeech.v1.ListVoicesRequest getDefaultInstanceForType() { + return com.google.cloud.texttospeech.v1.ListVoicesRequest.getDefaultInstance(); + } + + public com.google.cloud.texttospeech.v1.ListVoicesRequest build() { + com.google.cloud.texttospeech.v1.ListVoicesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.google.cloud.texttospeech.v1.ListVoicesRequest buildPartial() { + com.google.cloud.texttospeech.v1.ListVoicesRequest result = new com.google.cloud.texttospeech.v1.ListVoicesRequest(this); + result.languageCode_ = languageCode_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.texttospeech.v1.ListVoicesRequest) { + return mergeFrom((com.google.cloud.texttospeech.v1.ListVoicesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.texttospeech.v1.ListVoicesRequest other) { + if (other == com.google.cloud.texttospeech.v1.ListVoicesRequest.getDefaultInstance()) return this; + if (!other.getLanguageCode().isEmpty()) { + languageCode_ = other.languageCode_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.texttospeech.v1.ListVoicesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.texttospeech.v1.ListVoicesRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object languageCode_ = ""; + /** + *
+     * Optional (but recommended)
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
+     * specified, the ListVoices call will only return voices that can be used to
+     * synthesize this language_code. E.g. when specifying "en-NZ", you will get
+     * supported "en-*" voices; when specifying "no", you will get supported
+     * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh"
+     * will also get supported "cmn-*" voices; specifying "zh-hk" will also get
+     * supported "yue-*" voices.
+     * 
+ * + * string language_code = 1; + */ + public java.lang.String getLanguageCode() { + java.lang.Object ref = languageCode_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + languageCode_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * Optional (but recommended)
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
+     * specified, the ListVoices call will only return voices that can be used to
+     * synthesize this language_code. E.g. when specifying "en-NZ", you will get
+     * supported "en-*" voices; when specifying "no", you will get supported
+     * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh"
+     * will also get supported "cmn-*" voices; specifying "zh-hk" will also get
+     * supported "yue-*" voices.
+     * 
+ * + * string language_code = 1; + */ + public com.google.protobuf.ByteString + getLanguageCodeBytes() { + java.lang.Object ref = languageCode_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + languageCode_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * Optional (but recommended)
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
+     * specified, the ListVoices call will only return voices that can be used to
+     * synthesize this language_code. E.g. when specifying "en-NZ", you will get
+     * supported "en-*" voices; when specifying "no", you will get supported
+     * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh"
+     * will also get supported "cmn-*" voices; specifying "zh-hk" will also get
+     * supported "yue-*" voices.
+     * 
+ * + * string language_code = 1; + */ + public Builder setLanguageCode( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + languageCode_ = value; + onChanged(); + return this; + } + /** + *
+     * Optional (but recommended)
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
+     * specified, the ListVoices call will only return voices that can be used to
+     * synthesize this language_code. E.g. when specifying "en-NZ", you will get
+     * supported "en-*" voices; when specifying "no", you will get supported
+     * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh"
+     * will also get supported "cmn-*" voices; specifying "zh-hk" will also get
+     * supported "yue-*" voices.
+     * 
+ * + * string language_code = 1; + */ + public Builder clearLanguageCode() { + + languageCode_ = getDefaultInstance().getLanguageCode(); + onChanged(); + return this; + } + /** + *
+     * Optional (but recommended)
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
+     * specified, the ListVoices call will only return voices that can be used to
+     * synthesize this language_code. E.g. when specifying "en-NZ", you will get
+     * supported "en-*" voices; when specifying "no", you will get supported
+     * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh"
+     * will also get supported "cmn-*" voices; specifying "zh-hk" will also get
+     * supported "yue-*" voices.
+     * 
+ * + * string language_code = 1; + */ + public Builder setLanguageCodeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + languageCode_ = value; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.ListVoicesRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.ListVoicesRequest) + private static final com.google.cloud.texttospeech.v1.ListVoicesRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.ListVoicesRequest(); + } + + public static com.google.cloud.texttospeech.v1.ListVoicesRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + public ListVoicesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListVoicesRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public com.google.cloud.texttospeech.v1.ListVoicesRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequestOrBuilder.java new file mode 100644 index 000000000000..be63654600cd --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequestOrBuilder.java @@ -0,0 +1,41 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +public interface ListVoicesRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.texttospeech.v1.ListVoicesRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Optional (but recommended)
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
+   * specified, the ListVoices call will only return voices that can be used to
+   * synthesize this language_code. E.g. when specifying "en-NZ", you will get
+   * supported "en-*" voices; when specifying "no", you will get supported
+   * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh"
+   * will also get supported "cmn-*" voices; specifying "zh-hk" will also get
+   * supported "yue-*" voices.
+   * 
+ * + * string language_code = 1; + */ + java.lang.String getLanguageCode(); + /** + *
+   * Optional (but recommended)
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
+   * specified, the ListVoices call will only return voices that can be used to
+   * synthesize this language_code. E.g. when specifying "en-NZ", you will get
+   * supported "en-*" voices; when specifying "no", you will get supported
+   * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh"
+   * will also get supported "cmn-*" voices; specifying "zh-hk" will also get
+   * supported "yue-*" voices.
+   * 
+ * + * string language_code = 1; + */ + com.google.protobuf.ByteString + getLanguageCodeBytes(); +} diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesResponse.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesResponse.java new file mode 100644 index 000000000000..cecca6db7d9b --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesResponse.java @@ -0,0 +1,834 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +/** + *
+ * The message returned to the client by the `ListVoices` method.
+ * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.ListVoicesResponse} + */ +public final class ListVoicesResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.ListVoicesResponse) + ListVoicesResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use ListVoicesResponse.newBuilder() to construct. + private ListVoicesResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ListVoicesResponse() { + voices_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListVoicesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + voices_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + voices_.add( + input.readMessage(com.google.cloud.texttospeech.v1.Voice.parser(), extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + voices_ = java.util.Collections.unmodifiableList(voices_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.ListVoicesResponse.class, com.google.cloud.texttospeech.v1.ListVoicesResponse.Builder.class); + } + + public static final int VOICES_FIELD_NUMBER = 1; + private java.util.List voices_; + /** + *
+   * The list of voices.
+   * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public java.util.List getVoicesList() { + return voices_; + } + /** + *
+   * The list of voices.
+   * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public java.util.List + getVoicesOrBuilderList() { + return voices_; + } + /** + *
+   * The list of voices.
+   * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public int getVoicesCount() { + return voices_.size(); + } + /** + *
+   * The list of voices.
+   * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public com.google.cloud.texttospeech.v1.Voice getVoices(int index) { + return voices_.get(index); + } + /** + *
+   * The list of voices.
+   * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public com.google.cloud.texttospeech.v1.VoiceOrBuilder getVoicesOrBuilder( + int index) { + return voices_.get(index); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < voices_.size(); i++) { + output.writeMessage(1, voices_.get(i)); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < voices_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, voices_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.texttospeech.v1.ListVoicesResponse)) { + return super.equals(obj); + } + com.google.cloud.texttospeech.v1.ListVoicesResponse other = (com.google.cloud.texttospeech.v1.ListVoicesResponse) obj; + + boolean result = true; + result = result && getVoicesList() + .equals(other.getVoicesList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getVoicesCount() > 0) { + hash = (37 * hash) + VOICES_FIELD_NUMBER; + hash = (53 * hash) + getVoicesList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.texttospeech.v1.ListVoicesResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * The message returned to the client by the `ListVoices` method.
+   * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.ListVoicesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.texttospeech.v1.ListVoicesResponse) + com.google.cloud.texttospeech.v1.ListVoicesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.ListVoicesResponse.class, com.google.cloud.texttospeech.v1.ListVoicesResponse.Builder.class); + } + + // Construct using com.google.cloud.texttospeech.v1.ListVoicesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getVoicesFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (voicesBuilder_ == null) { + voices_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + voicesBuilder_.clear(); + } + return this; + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_descriptor; + } + + public com.google.cloud.texttospeech.v1.ListVoicesResponse getDefaultInstanceForType() { + return com.google.cloud.texttospeech.v1.ListVoicesResponse.getDefaultInstance(); + } + + public com.google.cloud.texttospeech.v1.ListVoicesResponse build() { + com.google.cloud.texttospeech.v1.ListVoicesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.google.cloud.texttospeech.v1.ListVoicesResponse buildPartial() { + com.google.cloud.texttospeech.v1.ListVoicesResponse result = new com.google.cloud.texttospeech.v1.ListVoicesResponse(this); + int from_bitField0_ = bitField0_; + if (voicesBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + voices_ = java.util.Collections.unmodifiableList(voices_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.voices_ = voices_; + } else { + result.voices_ = voicesBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.texttospeech.v1.ListVoicesResponse) { + return mergeFrom((com.google.cloud.texttospeech.v1.ListVoicesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.texttospeech.v1.ListVoicesResponse other) { + if (other == com.google.cloud.texttospeech.v1.ListVoicesResponse.getDefaultInstance()) return this; + if (voicesBuilder_ == null) { + if (!other.voices_.isEmpty()) { + if (voices_.isEmpty()) { + voices_ = other.voices_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureVoicesIsMutable(); + voices_.addAll(other.voices_); + } + onChanged(); + } + } else { + if (!other.voices_.isEmpty()) { + if (voicesBuilder_.isEmpty()) { + voicesBuilder_.dispose(); + voicesBuilder_ = null; + voices_ = other.voices_; + bitField0_ = (bitField0_ & ~0x00000001); + voicesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getVoicesFieldBuilder() : null; + } else { + voicesBuilder_.addAllMessages(other.voices_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.texttospeech.v1.ListVoicesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.texttospeech.v1.ListVoicesResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List voices_ = + java.util.Collections.emptyList(); + private void ensureVoicesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + voices_ = new java.util.ArrayList(voices_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.texttospeech.v1.Voice, com.google.cloud.texttospeech.v1.Voice.Builder, com.google.cloud.texttospeech.v1.VoiceOrBuilder> voicesBuilder_; + + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public java.util.List getVoicesList() { + if (voicesBuilder_ == null) { + return java.util.Collections.unmodifiableList(voices_); + } else { + return voicesBuilder_.getMessageList(); + } + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public int getVoicesCount() { + if (voicesBuilder_ == null) { + return voices_.size(); + } else { + return voicesBuilder_.getCount(); + } + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public com.google.cloud.texttospeech.v1.Voice getVoices(int index) { + if (voicesBuilder_ == null) { + return voices_.get(index); + } else { + return voicesBuilder_.getMessage(index); + } + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public Builder setVoices( + int index, com.google.cloud.texttospeech.v1.Voice value) { + if (voicesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureVoicesIsMutable(); + voices_.set(index, value); + onChanged(); + } else { + voicesBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public Builder setVoices( + int index, com.google.cloud.texttospeech.v1.Voice.Builder builderForValue) { + if (voicesBuilder_ == null) { + ensureVoicesIsMutable(); + voices_.set(index, builderForValue.build()); + onChanged(); + } else { + voicesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public Builder addVoices(com.google.cloud.texttospeech.v1.Voice value) { + if (voicesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureVoicesIsMutable(); + voices_.add(value); + onChanged(); + } else { + voicesBuilder_.addMessage(value); + } + return this; + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public Builder addVoices( + int index, com.google.cloud.texttospeech.v1.Voice value) { + if (voicesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureVoicesIsMutable(); + voices_.add(index, value); + onChanged(); + } else { + voicesBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public Builder addVoices( + com.google.cloud.texttospeech.v1.Voice.Builder builderForValue) { + if (voicesBuilder_ == null) { + ensureVoicesIsMutable(); + voices_.add(builderForValue.build()); + onChanged(); + } else { + voicesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public Builder addVoices( + int index, com.google.cloud.texttospeech.v1.Voice.Builder builderForValue) { + if (voicesBuilder_ == null) { + ensureVoicesIsMutable(); + voices_.add(index, builderForValue.build()); + onChanged(); + } else { + voicesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public Builder addAllVoices( + java.lang.Iterable values) { + if (voicesBuilder_ == null) { + ensureVoicesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, voices_); + onChanged(); + } else { + voicesBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public Builder clearVoices() { + if (voicesBuilder_ == null) { + voices_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + voicesBuilder_.clear(); + } + return this; + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public Builder removeVoices(int index) { + if (voicesBuilder_ == null) { + ensureVoicesIsMutable(); + voices_.remove(index); + onChanged(); + } else { + voicesBuilder_.remove(index); + } + return this; + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public com.google.cloud.texttospeech.v1.Voice.Builder getVoicesBuilder( + int index) { + return getVoicesFieldBuilder().getBuilder(index); + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public com.google.cloud.texttospeech.v1.VoiceOrBuilder getVoicesOrBuilder( + int index) { + if (voicesBuilder_ == null) { + return voices_.get(index); } else { + return voicesBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public java.util.List + getVoicesOrBuilderList() { + if (voicesBuilder_ != null) { + return voicesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(voices_); + } + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public com.google.cloud.texttospeech.v1.Voice.Builder addVoicesBuilder() { + return getVoicesFieldBuilder().addBuilder( + com.google.cloud.texttospeech.v1.Voice.getDefaultInstance()); + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public com.google.cloud.texttospeech.v1.Voice.Builder addVoicesBuilder( + int index) { + return getVoicesFieldBuilder().addBuilder( + index, com.google.cloud.texttospeech.v1.Voice.getDefaultInstance()); + } + /** + *
+     * The list of voices.
+     * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + public java.util.List + getVoicesBuilderList() { + return getVoicesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.texttospeech.v1.Voice, com.google.cloud.texttospeech.v1.Voice.Builder, com.google.cloud.texttospeech.v1.VoiceOrBuilder> + getVoicesFieldBuilder() { + if (voicesBuilder_ == null) { + voicesBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.texttospeech.v1.Voice, com.google.cloud.texttospeech.v1.Voice.Builder, com.google.cloud.texttospeech.v1.VoiceOrBuilder>( + voices_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + voices_ = null; + } + return voicesBuilder_; + } + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.ListVoicesResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.ListVoicesResponse) + private static final com.google.cloud.texttospeech.v1.ListVoicesResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.ListVoicesResponse(); + } + + public static com.google.cloud.texttospeech.v1.ListVoicesResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + public ListVoicesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListVoicesResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public com.google.cloud.texttospeech.v1.ListVoicesResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesResponseOrBuilder.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesResponseOrBuilder.java new file mode 100644 index 000000000000..253480e24e01 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesResponseOrBuilder.java @@ -0,0 +1,53 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +public interface ListVoicesResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.texttospeech.v1.ListVoicesResponse) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * The list of voices.
+   * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + java.util.List + getVoicesList(); + /** + *
+   * The list of voices.
+   * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + com.google.cloud.texttospeech.v1.Voice getVoices(int index); + /** + *
+   * The list of voices.
+   * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + int getVoicesCount(); + /** + *
+   * The list of voices.
+   * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + java.util.List + getVoicesOrBuilderList(); + /** + *
+   * The list of voices.
+   * 
+ * + * repeated .google.cloud.texttospeech.v1.Voice voices = 1; + */ + com.google.cloud.texttospeech.v1.VoiceOrBuilder getVoicesOrBuilder( + int index); +} diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SsmlVoiceGender.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SsmlVoiceGender.java new file mode 100644 index 000000000000..12481e55c8b0 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SsmlVoiceGender.java @@ -0,0 +1,166 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +/** + *
+ * Gender of the voice as described in
+ * [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice).
+ * 
+ * + * Protobuf enum {@code google.cloud.texttospeech.v1.SsmlVoiceGender} + */ +public enum SsmlVoiceGender + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+   * An unspecified gender.
+   * In VoiceSelectionParams, this means that the client doesn't care which
+   * gender the selected voice will have. In the Voice field of
+   * ListVoicesResponse, this may mean that the voice doesn't fit any of the
+   * other categories in this enum, or that the gender of the voice isn't known.
+   * 
+ * + * SSML_VOICE_GENDER_UNSPECIFIED = 0; + */ + SSML_VOICE_GENDER_UNSPECIFIED(0), + /** + *
+   * A male voice.
+   * 
+ * + * MALE = 1; + */ + MALE(1), + /** + *
+   * A female voice.
+   * 
+ * + * FEMALE = 2; + */ + FEMALE(2), + /** + *
+   * A gender-neutral voice.
+   * 
+ * + * NEUTRAL = 3; + */ + NEUTRAL(3), + UNRECOGNIZED(-1), + ; + + /** + *
+   * An unspecified gender.
+   * In VoiceSelectionParams, this means that the client doesn't care which
+   * gender the selected voice will have. In the Voice field of
+   * ListVoicesResponse, this may mean that the voice doesn't fit any of the
+   * other categories in this enum, or that the gender of the voice isn't known.
+   * 
+ * + * SSML_VOICE_GENDER_UNSPECIFIED = 0; + */ + public static final int SSML_VOICE_GENDER_UNSPECIFIED_VALUE = 0; + /** + *
+   * A male voice.
+   * 
+ * + * MALE = 1; + */ + public static final int MALE_VALUE = 1; + /** + *
+   * A female voice.
+   * 
+ * + * FEMALE = 2; + */ + public static final int FEMALE_VALUE = 2; + /** + *
+   * A gender-neutral voice.
+   * 
+ * + * NEUTRAL = 3; + */ + public static final int NEUTRAL_VALUE = 3; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SsmlVoiceGender valueOf(int value) { + return forNumber(value); + } + + public static SsmlVoiceGender forNumber(int value) { + switch (value) { + case 0: return SSML_VOICE_GENDER_UNSPECIFIED; + case 1: return MALE; + case 2: return FEMALE; + case 3: return NEUTRAL; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + SsmlVoiceGender> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public SsmlVoiceGender findValueByNumber(int number) { + return SsmlVoiceGender.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.getDescriptor().getEnumTypes().get(0); + } + + private static final SsmlVoiceGender[] VALUES = values(); + + public static SsmlVoiceGender valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private SsmlVoiceGender(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.texttospeech.v1.SsmlVoiceGender) +} + diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesisInput.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesisInput.java new file mode 100644 index 000000000000..7096500138b4 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesisInput.java @@ -0,0 +1,856 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +/** + *
+ * Contains text input to be synthesized. Either `text` or `ssml` must be
+ * supplied. Supplying both or neither returns
+ * [google.rpc.Code.INVALID_ARGUMENT][]. The input size is limited to 5000
+ * characters.
+ * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.SynthesisInput} + */ +public final class SynthesisInput extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.SynthesisInput) + SynthesisInputOrBuilder { +private static final long serialVersionUID = 0L; + // Use SynthesisInput.newBuilder() to construct. + private SynthesisInput(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SynthesisInput() { + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SynthesisInput( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + inputSourceCase_ = 1; + inputSource_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + inputSourceCase_ = 2; + inputSource_ = s; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesisInput_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesisInput_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.SynthesisInput.class, com.google.cloud.texttospeech.v1.SynthesisInput.Builder.class); + } + + private int inputSourceCase_ = 0; + private java.lang.Object inputSource_; + public enum InputSourceCase + implements com.google.protobuf.Internal.EnumLite { + TEXT(1), + SSML(2), + INPUTSOURCE_NOT_SET(0); + private final int value; + private InputSourceCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static InputSourceCase valueOf(int value) { + return forNumber(value); + } + + public static InputSourceCase forNumber(int value) { + switch (value) { + case 1: return TEXT; + case 2: return SSML; + case 0: return INPUTSOURCE_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public InputSourceCase + getInputSourceCase() { + return InputSourceCase.forNumber( + inputSourceCase_); + } + + public static final int TEXT_FIELD_NUMBER = 1; + /** + *
+   * The raw text to be synthesized.
+   * 
+ * + * string text = 1; + */ + public java.lang.String getText() { + java.lang.Object ref = ""; + if (inputSourceCase_ == 1) { + ref = inputSource_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (inputSourceCase_ == 1) { + inputSource_ = s; + } + return s; + } + } + /** + *
+   * The raw text to be synthesized.
+   * 
+ * + * string text = 1; + */ + public com.google.protobuf.ByteString + getTextBytes() { + java.lang.Object ref = ""; + if (inputSourceCase_ == 1) { + ref = inputSource_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (inputSourceCase_ == 1) { + inputSource_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SSML_FIELD_NUMBER = 2; + /** + *
+   * The SSML document to be synthesized. The SSML document must be valid
+   * and well-formed. Otherwise the RPC will fail and return
+   * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see
+   * [SSML](/speech/text-to-speech/docs/ssml).
+   * 
+ * + * string ssml = 2; + */ + public java.lang.String getSsml() { + java.lang.Object ref = ""; + if (inputSourceCase_ == 2) { + ref = inputSource_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (inputSourceCase_ == 2) { + inputSource_ = s; + } + return s; + } + } + /** + *
+   * The SSML document to be synthesized. The SSML document must be valid
+   * and well-formed. Otherwise the RPC will fail and return
+   * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see
+   * [SSML](/speech/text-to-speech/docs/ssml).
+   * 
+ * + * string ssml = 2; + */ + public com.google.protobuf.ByteString + getSsmlBytes() { + java.lang.Object ref = ""; + if (inputSourceCase_ == 2) { + ref = inputSource_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (inputSourceCase_ == 2) { + inputSource_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (inputSourceCase_ == 1) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, inputSource_); + } + if (inputSourceCase_ == 2) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, inputSource_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (inputSourceCase_ == 1) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, inputSource_); + } + if (inputSourceCase_ == 2) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, inputSource_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.texttospeech.v1.SynthesisInput)) { + return super.equals(obj); + } + com.google.cloud.texttospeech.v1.SynthesisInput other = (com.google.cloud.texttospeech.v1.SynthesisInput) obj; + + boolean result = true; + result = result && getInputSourceCase().equals( + other.getInputSourceCase()); + if (!result) return false; + switch (inputSourceCase_) { + case 1: + result = result && getText() + .equals(other.getText()); + break; + case 2: + result = result && getSsml() + .equals(other.getSsml()); + break; + case 0: + default: + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (inputSourceCase_) { + case 1: + hash = (37 * hash) + TEXT_FIELD_NUMBER; + hash = (53 * hash) + getText().hashCode(); + break; + case 2: + hash = (37 * hash) + SSML_FIELD_NUMBER; + hash = (53 * hash) + getSsml().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesisInput parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.SynthesisInput parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.texttospeech.v1.SynthesisInput prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Contains text input to be synthesized. Either `text` or `ssml` must be
+   * supplied. Supplying both or neither returns
+   * [google.rpc.Code.INVALID_ARGUMENT][]. The input size is limited to 5000
+   * characters.
+   * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.SynthesisInput} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.texttospeech.v1.SynthesisInput) + com.google.cloud.texttospeech.v1.SynthesisInputOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesisInput_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesisInput_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.SynthesisInput.class, com.google.cloud.texttospeech.v1.SynthesisInput.Builder.class); + } + + // Construct using com.google.cloud.texttospeech.v1.SynthesisInput.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + inputSourceCase_ = 0; + inputSource_ = null; + return this; + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesisInput_descriptor; + } + + public com.google.cloud.texttospeech.v1.SynthesisInput getDefaultInstanceForType() { + return com.google.cloud.texttospeech.v1.SynthesisInput.getDefaultInstance(); + } + + public com.google.cloud.texttospeech.v1.SynthesisInput build() { + com.google.cloud.texttospeech.v1.SynthesisInput result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.google.cloud.texttospeech.v1.SynthesisInput buildPartial() { + com.google.cloud.texttospeech.v1.SynthesisInput result = new com.google.cloud.texttospeech.v1.SynthesisInput(this); + if (inputSourceCase_ == 1) { + result.inputSource_ = inputSource_; + } + if (inputSourceCase_ == 2) { + result.inputSource_ = inputSource_; + } + result.inputSourceCase_ = inputSourceCase_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.texttospeech.v1.SynthesisInput) { + return mergeFrom((com.google.cloud.texttospeech.v1.SynthesisInput)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.texttospeech.v1.SynthesisInput other) { + if (other == com.google.cloud.texttospeech.v1.SynthesisInput.getDefaultInstance()) return this; + switch (other.getInputSourceCase()) { + case TEXT: { + inputSourceCase_ = 1; + inputSource_ = other.inputSource_; + onChanged(); + break; + } + case SSML: { + inputSourceCase_ = 2; + inputSource_ = other.inputSource_; + onChanged(); + break; + } + case INPUTSOURCE_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.texttospeech.v1.SynthesisInput parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.texttospeech.v1.SynthesisInput) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int inputSourceCase_ = 0; + private java.lang.Object inputSource_; + public InputSourceCase + getInputSourceCase() { + return InputSourceCase.forNumber( + inputSourceCase_); + } + + public Builder clearInputSource() { + inputSourceCase_ = 0; + inputSource_ = null; + onChanged(); + return this; + } + + + /** + *
+     * The raw text to be synthesized.
+     * 
+ * + * string text = 1; + */ + public java.lang.String getText() { + java.lang.Object ref = ""; + if (inputSourceCase_ == 1) { + ref = inputSource_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (inputSourceCase_ == 1) { + inputSource_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * The raw text to be synthesized.
+     * 
+ * + * string text = 1; + */ + public com.google.protobuf.ByteString + getTextBytes() { + java.lang.Object ref = ""; + if (inputSourceCase_ == 1) { + ref = inputSource_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (inputSourceCase_ == 1) { + inputSource_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * The raw text to be synthesized.
+     * 
+ * + * string text = 1; + */ + public Builder setText( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + inputSourceCase_ = 1; + inputSource_ = value; + onChanged(); + return this; + } + /** + *
+     * The raw text to be synthesized.
+     * 
+ * + * string text = 1; + */ + public Builder clearText() { + if (inputSourceCase_ == 1) { + inputSourceCase_ = 0; + inputSource_ = null; + onChanged(); + } + return this; + } + /** + *
+     * The raw text to be synthesized.
+     * 
+ * + * string text = 1; + */ + public Builder setTextBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + inputSourceCase_ = 1; + inputSource_ = value; + onChanged(); + return this; + } + + /** + *
+     * The SSML document to be synthesized. The SSML document must be valid
+     * and well-formed. Otherwise the RPC will fail and return
+     * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see
+     * [SSML](/speech/text-to-speech/docs/ssml).
+     * 
+ * + * string ssml = 2; + */ + public java.lang.String getSsml() { + java.lang.Object ref = ""; + if (inputSourceCase_ == 2) { + ref = inputSource_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (inputSourceCase_ == 2) { + inputSource_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * The SSML document to be synthesized. The SSML document must be valid
+     * and well-formed. Otherwise the RPC will fail and return
+     * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see
+     * [SSML](/speech/text-to-speech/docs/ssml).
+     * 
+ * + * string ssml = 2; + */ + public com.google.protobuf.ByteString + getSsmlBytes() { + java.lang.Object ref = ""; + if (inputSourceCase_ == 2) { + ref = inputSource_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + if (inputSourceCase_ == 2) { + inputSource_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * The SSML document to be synthesized. The SSML document must be valid
+     * and well-formed. Otherwise the RPC will fail and return
+     * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see
+     * [SSML](/speech/text-to-speech/docs/ssml).
+     * 
+ * + * string ssml = 2; + */ + public Builder setSsml( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + inputSourceCase_ = 2; + inputSource_ = value; + onChanged(); + return this; + } + /** + *
+     * The SSML document to be synthesized. The SSML document must be valid
+     * and well-formed. Otherwise the RPC will fail and return
+     * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see
+     * [SSML](/speech/text-to-speech/docs/ssml).
+     * 
+ * + * string ssml = 2; + */ + public Builder clearSsml() { + if (inputSourceCase_ == 2) { + inputSourceCase_ = 0; + inputSource_ = null; + onChanged(); + } + return this; + } + /** + *
+     * The SSML document to be synthesized. The SSML document must be valid
+     * and well-formed. Otherwise the RPC will fail and return
+     * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see
+     * [SSML](/speech/text-to-speech/docs/ssml).
+     * 
+ * + * string ssml = 2; + */ + public Builder setSsmlBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + inputSourceCase_ = 2; + inputSource_ = value; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.SynthesisInput) + } + + // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.SynthesisInput) + private static final com.google.cloud.texttospeech.v1.SynthesisInput DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.SynthesisInput(); + } + + public static com.google.cloud.texttospeech.v1.SynthesisInput getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + public SynthesisInput parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SynthesisInput(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public com.google.cloud.texttospeech.v1.SynthesisInput getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesisInputOrBuilder.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesisInputOrBuilder.java new file mode 100644 index 000000000000..5a4477e12a50 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesisInputOrBuilder.java @@ -0,0 +1,53 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +public interface SynthesisInputOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.texttospeech.v1.SynthesisInput) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * The raw text to be synthesized.
+   * 
+ * + * string text = 1; + */ + java.lang.String getText(); + /** + *
+   * The raw text to be synthesized.
+   * 
+ * + * string text = 1; + */ + com.google.protobuf.ByteString + getTextBytes(); + + /** + *
+   * The SSML document to be synthesized. The SSML document must be valid
+   * and well-formed. Otherwise the RPC will fail and return
+   * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see
+   * [SSML](/speech/text-to-speech/docs/ssml).
+   * 
+ * + * string ssml = 2; + */ + java.lang.String getSsml(); + /** + *
+   * The SSML document to be synthesized. The SSML document must be valid
+   * and well-formed. Otherwise the RPC will fail and return
+   * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see
+   * [SSML](/speech/text-to-speech/docs/ssml).
+   * 
+ * + * string ssml = 2; + */ + com.google.protobuf.ByteString + getSsmlBytes(); + + public com.google.cloud.texttospeech.v1.SynthesisInput.InputSourceCase getInputSourceCase(); +} diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechRequest.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechRequest.java new file mode 100644 index 000000000000..bd0b1c6f228d --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechRequest.java @@ -0,0 +1,1084 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +/** + *
+ * The top-level message sent by the client for the `SynthesizeSpeech` method.
+ * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.SynthesizeSpeechRequest} + */ +public final class SynthesizeSpeechRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.SynthesizeSpeechRequest) + SynthesizeSpeechRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use SynthesizeSpeechRequest.newBuilder() to construct. + private SynthesizeSpeechRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SynthesizeSpeechRequest() { + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SynthesizeSpeechRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + com.google.cloud.texttospeech.v1.SynthesisInput.Builder subBuilder = null; + if (input_ != null) { + subBuilder = input_.toBuilder(); + } + input_ = input.readMessage(com.google.cloud.texttospeech.v1.SynthesisInput.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(input_); + input_ = subBuilder.buildPartial(); + } + + break; + } + case 18: { + com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder subBuilder = null; + if (voice_ != null) { + subBuilder = voice_.toBuilder(); + } + voice_ = input.readMessage(com.google.cloud.texttospeech.v1.VoiceSelectionParams.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(voice_); + voice_ = subBuilder.buildPartial(); + } + + break; + } + case 26: { + com.google.cloud.texttospeech.v1.AudioConfig.Builder subBuilder = null; + if (audioConfig_ != null) { + subBuilder = audioConfig_.toBuilder(); + } + audioConfig_ = input.readMessage(com.google.cloud.texttospeech.v1.AudioConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(audioConfig_); + audioConfig_ = subBuilder.buildPartial(); + } + + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest.class, com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest.Builder.class); + } + + public static final int INPUT_FIELD_NUMBER = 1; + private com.google.cloud.texttospeech.v1.SynthesisInput input_; + /** + *
+   * Required. The Synthesizer requires either plain text or SSML as input.
+   * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + public boolean hasInput() { + return input_ != null; + } + /** + *
+   * Required. The Synthesizer requires either plain text or SSML as input.
+   * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + public com.google.cloud.texttospeech.v1.SynthesisInput getInput() { + return input_ == null ? com.google.cloud.texttospeech.v1.SynthesisInput.getDefaultInstance() : input_; + } + /** + *
+   * Required. The Synthesizer requires either plain text or SSML as input.
+   * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + public com.google.cloud.texttospeech.v1.SynthesisInputOrBuilder getInputOrBuilder() { + return getInput(); + } + + public static final int VOICE_FIELD_NUMBER = 2; + private com.google.cloud.texttospeech.v1.VoiceSelectionParams voice_; + /** + *
+   * Required. The desired voice of the synthesized audio.
+   * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + public boolean hasVoice() { + return voice_ != null; + } + /** + *
+   * Required. The desired voice of the synthesized audio.
+   * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + public com.google.cloud.texttospeech.v1.VoiceSelectionParams getVoice() { + return voice_ == null ? com.google.cloud.texttospeech.v1.VoiceSelectionParams.getDefaultInstance() : voice_; + } + /** + *
+   * Required. The desired voice of the synthesized audio.
+   * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + public com.google.cloud.texttospeech.v1.VoiceSelectionParamsOrBuilder getVoiceOrBuilder() { + return getVoice(); + } + + public static final int AUDIO_CONFIG_FIELD_NUMBER = 3; + private com.google.cloud.texttospeech.v1.AudioConfig audioConfig_; + /** + *
+   * Required. The configuration of the synthesized audio.
+   * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + public boolean hasAudioConfig() { + return audioConfig_ != null; + } + /** + *
+   * Required. The configuration of the synthesized audio.
+   * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + public com.google.cloud.texttospeech.v1.AudioConfig getAudioConfig() { + return audioConfig_ == null ? com.google.cloud.texttospeech.v1.AudioConfig.getDefaultInstance() : audioConfig_; + } + /** + *
+   * Required. The configuration of the synthesized audio.
+   * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + public com.google.cloud.texttospeech.v1.AudioConfigOrBuilder getAudioConfigOrBuilder() { + return getAudioConfig(); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (input_ != null) { + output.writeMessage(1, getInput()); + } + if (voice_ != null) { + output.writeMessage(2, getVoice()); + } + if (audioConfig_ != null) { + output.writeMessage(3, getAudioConfig()); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (input_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getInput()); + } + if (voice_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getVoice()); + } + if (audioConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getAudioConfig()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest)) { + return super.equals(obj); + } + com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest other = (com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest) obj; + + boolean result = true; + result = result && (hasInput() == other.hasInput()); + if (hasInput()) { + result = result && getInput() + .equals(other.getInput()); + } + result = result && (hasVoice() == other.hasVoice()); + if (hasVoice()) { + result = result && getVoice() + .equals(other.getVoice()); + } + result = result && (hasAudioConfig() == other.hasAudioConfig()); + if (hasAudioConfig()) { + result = result && getAudioConfig() + .equals(other.getAudioConfig()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasInput()) { + hash = (37 * hash) + INPUT_FIELD_NUMBER; + hash = (53 * hash) + getInput().hashCode(); + } + if (hasVoice()) { + hash = (37 * hash) + VOICE_FIELD_NUMBER; + hash = (53 * hash) + getVoice().hashCode(); + } + if (hasAudioConfig()) { + hash = (37 * hash) + AUDIO_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getAudioConfig().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * The top-level message sent by the client for the `SynthesizeSpeech` method.
+   * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.SynthesizeSpeechRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.texttospeech.v1.SynthesizeSpeechRequest) + com.google.cloud.texttospeech.v1.SynthesizeSpeechRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest.class, com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest.Builder.class); + } + + // Construct using com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + if (inputBuilder_ == null) { + input_ = null; + } else { + input_ = null; + inputBuilder_ = null; + } + if (voiceBuilder_ == null) { + voice_ = null; + } else { + voice_ = null; + voiceBuilder_ = null; + } + if (audioConfigBuilder_ == null) { + audioConfig_ = null; + } else { + audioConfig_ = null; + audioConfigBuilder_ = null; + } + return this; + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_descriptor; + } + + public com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest getDefaultInstanceForType() { + return com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest.getDefaultInstance(); + } + + public com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest build() { + com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest buildPartial() { + com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest result = new com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest(this); + if (inputBuilder_ == null) { + result.input_ = input_; + } else { + result.input_ = inputBuilder_.build(); + } + if (voiceBuilder_ == null) { + result.voice_ = voice_; + } else { + result.voice_ = voiceBuilder_.build(); + } + if (audioConfigBuilder_ == null) { + result.audioConfig_ = audioConfig_; + } else { + result.audioConfig_ = audioConfigBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest) { + return mergeFrom((com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest other) { + if (other == com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest.getDefaultInstance()) return this; + if (other.hasInput()) { + mergeInput(other.getInput()); + } + if (other.hasVoice()) { + mergeVoice(other.getVoice()); + } + if (other.hasAudioConfig()) { + mergeAudioConfig(other.getAudioConfig()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.cloud.texttospeech.v1.SynthesisInput input_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.texttospeech.v1.SynthesisInput, com.google.cloud.texttospeech.v1.SynthesisInput.Builder, com.google.cloud.texttospeech.v1.SynthesisInputOrBuilder> inputBuilder_; + /** + *
+     * Required. The Synthesizer requires either plain text or SSML as input.
+     * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + public boolean hasInput() { + return inputBuilder_ != null || input_ != null; + } + /** + *
+     * Required. The Synthesizer requires either plain text or SSML as input.
+     * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + public com.google.cloud.texttospeech.v1.SynthesisInput getInput() { + if (inputBuilder_ == null) { + return input_ == null ? com.google.cloud.texttospeech.v1.SynthesisInput.getDefaultInstance() : input_; + } else { + return inputBuilder_.getMessage(); + } + } + /** + *
+     * Required. The Synthesizer requires either plain text or SSML as input.
+     * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + public Builder setInput(com.google.cloud.texttospeech.v1.SynthesisInput value) { + if (inputBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + input_ = value; + onChanged(); + } else { + inputBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. The Synthesizer requires either plain text or SSML as input.
+     * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + public Builder setInput( + com.google.cloud.texttospeech.v1.SynthesisInput.Builder builderForValue) { + if (inputBuilder_ == null) { + input_ = builderForValue.build(); + onChanged(); + } else { + inputBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. The Synthesizer requires either plain text or SSML as input.
+     * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + public Builder mergeInput(com.google.cloud.texttospeech.v1.SynthesisInput value) { + if (inputBuilder_ == null) { + if (input_ != null) { + input_ = + com.google.cloud.texttospeech.v1.SynthesisInput.newBuilder(input_).mergeFrom(value).buildPartial(); + } else { + input_ = value; + } + onChanged(); + } else { + inputBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. The Synthesizer requires either plain text or SSML as input.
+     * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + public Builder clearInput() { + if (inputBuilder_ == null) { + input_ = null; + onChanged(); + } else { + input_ = null; + inputBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. The Synthesizer requires either plain text or SSML as input.
+     * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + public com.google.cloud.texttospeech.v1.SynthesisInput.Builder getInputBuilder() { + + onChanged(); + return getInputFieldBuilder().getBuilder(); + } + /** + *
+     * Required. The Synthesizer requires either plain text or SSML as input.
+     * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + public com.google.cloud.texttospeech.v1.SynthesisInputOrBuilder getInputOrBuilder() { + if (inputBuilder_ != null) { + return inputBuilder_.getMessageOrBuilder(); + } else { + return input_ == null ? + com.google.cloud.texttospeech.v1.SynthesisInput.getDefaultInstance() : input_; + } + } + /** + *
+     * Required. The Synthesizer requires either plain text or SSML as input.
+     * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.texttospeech.v1.SynthesisInput, com.google.cloud.texttospeech.v1.SynthesisInput.Builder, com.google.cloud.texttospeech.v1.SynthesisInputOrBuilder> + getInputFieldBuilder() { + if (inputBuilder_ == null) { + inputBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.texttospeech.v1.SynthesisInput, com.google.cloud.texttospeech.v1.SynthesisInput.Builder, com.google.cloud.texttospeech.v1.SynthesisInputOrBuilder>( + getInput(), + getParentForChildren(), + isClean()); + input_ = null; + } + return inputBuilder_; + } + + private com.google.cloud.texttospeech.v1.VoiceSelectionParams voice_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.texttospeech.v1.VoiceSelectionParams, com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder, com.google.cloud.texttospeech.v1.VoiceSelectionParamsOrBuilder> voiceBuilder_; + /** + *
+     * Required. The desired voice of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + public boolean hasVoice() { + return voiceBuilder_ != null || voice_ != null; + } + /** + *
+     * Required. The desired voice of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + public com.google.cloud.texttospeech.v1.VoiceSelectionParams getVoice() { + if (voiceBuilder_ == null) { + return voice_ == null ? com.google.cloud.texttospeech.v1.VoiceSelectionParams.getDefaultInstance() : voice_; + } else { + return voiceBuilder_.getMessage(); + } + } + /** + *
+     * Required. The desired voice of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + public Builder setVoice(com.google.cloud.texttospeech.v1.VoiceSelectionParams value) { + if (voiceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + voice_ = value; + onChanged(); + } else { + voiceBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. The desired voice of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + public Builder setVoice( + com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder builderForValue) { + if (voiceBuilder_ == null) { + voice_ = builderForValue.build(); + onChanged(); + } else { + voiceBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. The desired voice of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + public Builder mergeVoice(com.google.cloud.texttospeech.v1.VoiceSelectionParams value) { + if (voiceBuilder_ == null) { + if (voice_ != null) { + voice_ = + com.google.cloud.texttospeech.v1.VoiceSelectionParams.newBuilder(voice_).mergeFrom(value).buildPartial(); + } else { + voice_ = value; + } + onChanged(); + } else { + voiceBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. The desired voice of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + public Builder clearVoice() { + if (voiceBuilder_ == null) { + voice_ = null; + onChanged(); + } else { + voice_ = null; + voiceBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. The desired voice of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + public com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder getVoiceBuilder() { + + onChanged(); + return getVoiceFieldBuilder().getBuilder(); + } + /** + *
+     * Required. The desired voice of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + public com.google.cloud.texttospeech.v1.VoiceSelectionParamsOrBuilder getVoiceOrBuilder() { + if (voiceBuilder_ != null) { + return voiceBuilder_.getMessageOrBuilder(); + } else { + return voice_ == null ? + com.google.cloud.texttospeech.v1.VoiceSelectionParams.getDefaultInstance() : voice_; + } + } + /** + *
+     * Required. The desired voice of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.texttospeech.v1.VoiceSelectionParams, com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder, com.google.cloud.texttospeech.v1.VoiceSelectionParamsOrBuilder> + getVoiceFieldBuilder() { + if (voiceBuilder_ == null) { + voiceBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.texttospeech.v1.VoiceSelectionParams, com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder, com.google.cloud.texttospeech.v1.VoiceSelectionParamsOrBuilder>( + getVoice(), + getParentForChildren(), + isClean()); + voice_ = null; + } + return voiceBuilder_; + } + + private com.google.cloud.texttospeech.v1.AudioConfig audioConfig_ = null; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.texttospeech.v1.AudioConfig, com.google.cloud.texttospeech.v1.AudioConfig.Builder, com.google.cloud.texttospeech.v1.AudioConfigOrBuilder> audioConfigBuilder_; + /** + *
+     * Required. The configuration of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + public boolean hasAudioConfig() { + return audioConfigBuilder_ != null || audioConfig_ != null; + } + /** + *
+     * Required. The configuration of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + public com.google.cloud.texttospeech.v1.AudioConfig getAudioConfig() { + if (audioConfigBuilder_ == null) { + return audioConfig_ == null ? com.google.cloud.texttospeech.v1.AudioConfig.getDefaultInstance() : audioConfig_; + } else { + return audioConfigBuilder_.getMessage(); + } + } + /** + *
+     * Required. The configuration of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + public Builder setAudioConfig(com.google.cloud.texttospeech.v1.AudioConfig value) { + if (audioConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + audioConfig_ = value; + onChanged(); + } else { + audioConfigBuilder_.setMessage(value); + } + + return this; + } + /** + *
+     * Required. The configuration of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + public Builder setAudioConfig( + com.google.cloud.texttospeech.v1.AudioConfig.Builder builderForValue) { + if (audioConfigBuilder_ == null) { + audioConfig_ = builderForValue.build(); + onChanged(); + } else { + audioConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
+     * Required. The configuration of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + public Builder mergeAudioConfig(com.google.cloud.texttospeech.v1.AudioConfig value) { + if (audioConfigBuilder_ == null) { + if (audioConfig_ != null) { + audioConfig_ = + com.google.cloud.texttospeech.v1.AudioConfig.newBuilder(audioConfig_).mergeFrom(value).buildPartial(); + } else { + audioConfig_ = value; + } + onChanged(); + } else { + audioConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
+     * Required. The configuration of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + public Builder clearAudioConfig() { + if (audioConfigBuilder_ == null) { + audioConfig_ = null; + onChanged(); + } else { + audioConfig_ = null; + audioConfigBuilder_ = null; + } + + return this; + } + /** + *
+     * Required. The configuration of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + public com.google.cloud.texttospeech.v1.AudioConfig.Builder getAudioConfigBuilder() { + + onChanged(); + return getAudioConfigFieldBuilder().getBuilder(); + } + /** + *
+     * Required. The configuration of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + public com.google.cloud.texttospeech.v1.AudioConfigOrBuilder getAudioConfigOrBuilder() { + if (audioConfigBuilder_ != null) { + return audioConfigBuilder_.getMessageOrBuilder(); + } else { + return audioConfig_ == null ? + com.google.cloud.texttospeech.v1.AudioConfig.getDefaultInstance() : audioConfig_; + } + } + /** + *
+     * Required. The configuration of the synthesized audio.
+     * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.texttospeech.v1.AudioConfig, com.google.cloud.texttospeech.v1.AudioConfig.Builder, com.google.cloud.texttospeech.v1.AudioConfigOrBuilder> + getAudioConfigFieldBuilder() { + if (audioConfigBuilder_ == null) { + audioConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.texttospeech.v1.AudioConfig, com.google.cloud.texttospeech.v1.AudioConfig.Builder, com.google.cloud.texttospeech.v1.AudioConfigOrBuilder>( + getAudioConfig(), + getParentForChildren(), + isClean()); + audioConfig_ = null; + } + return audioConfigBuilder_; + } + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.SynthesizeSpeechRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.SynthesizeSpeechRequest) + private static final com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest(); + } + + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + public SynthesizeSpeechRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SynthesizeSpeechRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechRequestOrBuilder.java new file mode 100644 index 000000000000..f1cf70304f7e --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechRequestOrBuilder.java @@ -0,0 +1,84 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +public interface SynthesizeSpeechRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.texttospeech.v1.SynthesizeSpeechRequest) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * Required. The Synthesizer requires either plain text or SSML as input.
+   * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + boolean hasInput(); + /** + *
+   * Required. The Synthesizer requires either plain text or SSML as input.
+   * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + com.google.cloud.texttospeech.v1.SynthesisInput getInput(); + /** + *
+   * Required. The Synthesizer requires either plain text or SSML as input.
+   * 
+ * + * .google.cloud.texttospeech.v1.SynthesisInput input = 1; + */ + com.google.cloud.texttospeech.v1.SynthesisInputOrBuilder getInputOrBuilder(); + + /** + *
+   * Required. The desired voice of the synthesized audio.
+   * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + boolean hasVoice(); + /** + *
+   * Required. The desired voice of the synthesized audio.
+   * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + com.google.cloud.texttospeech.v1.VoiceSelectionParams getVoice(); + /** + *
+   * Required. The desired voice of the synthesized audio.
+   * 
+ * + * .google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2; + */ + com.google.cloud.texttospeech.v1.VoiceSelectionParamsOrBuilder getVoiceOrBuilder(); + + /** + *
+   * Required. The configuration of the synthesized audio.
+   * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + boolean hasAudioConfig(); + /** + *
+   * Required. The configuration of the synthesized audio.
+   * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + com.google.cloud.texttospeech.v1.AudioConfig getAudioConfig(); + /** + *
+   * Required. The configuration of the synthesized audio.
+   * 
+ * + * .google.cloud.texttospeech.v1.AudioConfig audio_config = 3; + */ + com.google.cloud.texttospeech.v1.AudioConfigOrBuilder getAudioConfigOrBuilder(); +} diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechResponse.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechResponse.java new file mode 100644 index 000000000000..19ea28eb872b --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechResponse.java @@ -0,0 +1,486 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +/** + *
+ * The message returned to the client by the `SynthesizeSpeech` method.
+ * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.SynthesizeSpeechResponse} + */ +public final class SynthesizeSpeechResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.SynthesizeSpeechResponse) + SynthesizeSpeechResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use SynthesizeSpeechResponse.newBuilder() to construct. + private SynthesizeSpeechResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SynthesizeSpeechResponse() { + audioContent_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SynthesizeSpeechResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + + audioContent_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse.class, com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse.Builder.class); + } + + public static final int AUDIO_CONTENT_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString audioContent_; + /** + *
+   * The audio data bytes encoded as specified in the request, including the
+   * header (For LINEAR16 audio, we include the WAV header). Note: as
+   * with all bytes fields, protobuffers use a pure binary representation,
+   * whereas JSON representations use base64.
+   * 
+ * + * bytes audio_content = 1; + */ + public com.google.protobuf.ByteString getAudioContent() { + return audioContent_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!audioContent_.isEmpty()) { + output.writeBytes(1, audioContent_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!audioContent_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, audioContent_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse)) { + return super.equals(obj); + } + com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse other = (com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse) obj; + + boolean result = true; + result = result && getAudioContent() + .equals(other.getAudioContent()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + AUDIO_CONTENT_FIELD_NUMBER; + hash = (53 * hash) + getAudioContent().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * The message returned to the client by the `SynthesizeSpeech` method.
+   * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.SynthesizeSpeechResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.texttospeech.v1.SynthesizeSpeechResponse) + com.google.cloud.texttospeech.v1.SynthesizeSpeechResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse.class, com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse.Builder.class); + } + + // Construct using com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + audioContent_ = com.google.protobuf.ByteString.EMPTY; + + return this; + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_descriptor; + } + + public com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse getDefaultInstanceForType() { + return com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse.getDefaultInstance(); + } + + public com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse build() { + com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse buildPartial() { + com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse result = new com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse(this); + result.audioContent_ = audioContent_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse) { + return mergeFrom((com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse other) { + if (other == com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse.getDefaultInstance()) return this; + if (other.getAudioContent() != com.google.protobuf.ByteString.EMPTY) { + setAudioContent(other.getAudioContent()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.protobuf.ByteString audioContent_ = com.google.protobuf.ByteString.EMPTY; + /** + *
+     * The audio data bytes encoded as specified in the request, including the
+     * header (For LINEAR16 audio, we include the WAV header). Note: as
+     * with all bytes fields, protobuffers use a pure binary representation,
+     * whereas JSON representations use base64.
+     * 
+ * + * bytes audio_content = 1; + */ + public com.google.protobuf.ByteString getAudioContent() { + return audioContent_; + } + /** + *
+     * The audio data bytes encoded as specified in the request, including the
+     * header (For LINEAR16 audio, we include the WAV header). Note: as
+     * with all bytes fields, protobuffers use a pure binary representation,
+     * whereas JSON representations use base64.
+     * 
+ * + * bytes audio_content = 1; + */ + public Builder setAudioContent(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + + audioContent_ = value; + onChanged(); + return this; + } + /** + *
+     * The audio data bytes encoded as specified in the request, including the
+     * header (For LINEAR16 audio, we include the WAV header). Note: as
+     * with all bytes fields, protobuffers use a pure binary representation,
+     * whereas JSON representations use base64.
+     * 
+ * + * bytes audio_content = 1; + */ + public Builder clearAudioContent() { + + audioContent_ = getDefaultInstance().getAudioContent(); + onChanged(); + return this; + } + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.SynthesizeSpeechResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.SynthesizeSpeechResponse) + private static final com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse(); + } + + public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + public SynthesizeSpeechResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SynthesizeSpeechResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechResponseOrBuilder.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechResponseOrBuilder.java new file mode 100644 index 000000000000..0f6cd18e0efe --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechResponseOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +public interface SynthesizeSpeechResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.texttospeech.v1.SynthesizeSpeechResponse) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * The audio data bytes encoded as specified in the request, including the
+   * header (For LINEAR16 audio, we include the WAV header). Note: as
+   * with all bytes fields, protobuffers use a pure binary representation,
+   * whereas JSON representations use base64.
+   * 
+ * + * bytes audio_content = 1; + */ + com.google.protobuf.ByteString getAudioContent(); +} diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechProto.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechProto.java new file mode 100644 index 000000000000..b06155ef6cd8 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechProto.java @@ -0,0 +1,180 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +public final class TextToSpeechProto { + private TextToSpeechProto() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_texttospeech_v1_Voice_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_texttospeech_v1_Voice_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_texttospeech_v1_SynthesisInput_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_texttospeech_v1_SynthesisInput_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_texttospeech_v1_AudioConfig_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_texttospeech_v1_AudioConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n,google/cloud/texttospeech/v1/cloud_tts" + + ".proto\022\034google.cloud.texttospeech.v1\032\034go" + + "ogle/api/annotations.proto\"*\n\021ListVoices" + + "Request\022\025\n\rlanguage_code\030\001 \001(\t\"I\n\022ListVo" + + "icesResponse\0223\n\006voices\030\001 \003(\0132#.google.cl" + + "oud.texttospeech.v1.Voice\"\224\001\n\005Voice\022\026\n\016l" + + "anguage_codes\030\001 \003(\t\022\014\n\004name\030\002 \001(\t\022B\n\013ssm" + + "l_gender\030\003 \001(\0162-.google.cloud.texttospee" + + "ch.v1.SsmlVoiceGender\022!\n\031natural_sample_" + + "rate_hertz\030\004 \001(\005\"\332\001\n\027SynthesizeSpeechReq" + + "uest\022;\n\005input\030\001 \001(\0132,.google.cloud.textt" + + "ospeech.v1.SynthesisInput\022A\n\005voice\030\002 \001(\013" + + "22.google.cloud.texttospeech.v1.VoiceSel" + + "ectionParams\022?\n\014audio_config\030\003 \001(\0132).goo" + + "gle.cloud.texttospeech.v1.AudioConfig\"@\n" + + "\016SynthesisInput\022\016\n\004text\030\001 \001(\tH\000\022\016\n\004ssml\030" + + "\002 \001(\tH\000B\016\n\014input_source\"\177\n\024VoiceSelectio" + + "nParams\022\025\n\rlanguage_code\030\001 \001(\t\022\014\n\004name\030\002" + + " \001(\t\022B\n\013ssml_gender\030\003 \001(\0162-.google.cloud" + + ".texttospeech.v1.SsmlVoiceGender\"\253\001\n\013Aud" + + "ioConfig\022C\n\016audio_encoding\030\001 \001(\0162+.googl" + + "e.cloud.texttospeech.v1.AudioEncoding\022\025\n" + + "\rspeaking_rate\030\002 \001(\001\022\r\n\005pitch\030\003 \001(\001\022\026\n\016v" + + "olume_gain_db\030\004 \001(\001\022\031\n\021sample_rate_hertz" + + "\030\005 \001(\005\"1\n\030SynthesizeSpeechResponse\022\025\n\rau" + + "dio_content\030\001 \001(\014*W\n\017SsmlVoiceGender\022!\n\035" + + "SSML_VOICE_GENDER_UNSPECIFIED\020\000\022\010\n\004MALE\020" + + "\001\022\n\n\006FEMALE\020\002\022\013\n\007NEUTRAL\020\003*T\n\rAudioEncod" + + "ing\022\036\n\032AUDIO_ENCODING_UNSPECIFIED\020\000\022\014\n\010L" + + "INEAR16\020\001\022\007\n\003MP3\020\002\022\014\n\010OGG_OPUS\020\0032\270\002\n\014Tex" + + "tToSpeech\022\203\001\n\nListVoices\022/.google.cloud." + + "texttospeech.v1.ListVoicesRequest\0320.goog" + + "le.cloud.texttospeech.v1.ListVoicesRespo" + + "nse\"\022\202\323\344\223\002\014\022\n/v1/voices\022\241\001\n\020SynthesizeSp" + + "eech\0225.google.cloud.texttospeech.v1.Synt" + + "hesizeSpeechRequest\0326.google.cloud.textt" + + "ospeech.v1.SynthesizeSpeechResponse\"\036\202\323\344" + + "\223\002\030\"\023/v1/text:synthesize:\001*B\302\001\n com.goog" + + "le.cloud.texttospeech.v1B\021TextToSpeechPr" + + "otoP\001ZHgoogle.golang.org/genproto/google" + + "apis/cloud/texttospeech/v1;texttospeech\370" + + "\001\001\252\002\034Google.Cloud.TextToSpeech.V1\312\002\034Goog" + + "le\\Cloud\\TextToSpeech\\V1b\006proto3" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + }, assigner); + internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_descriptor, + new java.lang.String[] { "LanguageCode", }); + internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_descriptor, + new java.lang.String[] { "Voices", }); + internal_static_google_cloud_texttospeech_v1_Voice_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_texttospeech_v1_Voice_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_texttospeech_v1_Voice_descriptor, + new java.lang.String[] { "LanguageCodes", "Name", "SsmlGender", "NaturalSampleRateHertz", }); + internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_descriptor, + new java.lang.String[] { "Input", "Voice", "AudioConfig", }); + internal_static_google_cloud_texttospeech_v1_SynthesisInput_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_texttospeech_v1_SynthesisInput_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_texttospeech_v1_SynthesisInput_descriptor, + new java.lang.String[] { "Text", "Ssml", "InputSource", }); + internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_descriptor, + new java.lang.String[] { "LanguageCode", "Name", "SsmlGender", }); + internal_static_google_cloud_texttospeech_v1_AudioConfig_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_texttospeech_v1_AudioConfig_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_texttospeech_v1_AudioConfig_descriptor, + new java.lang.String[] { "AudioEncoding", "SpeakingRate", "Pitch", "VolumeGainDb", "SampleRateHertz", }); + internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_descriptor, + new java.lang.String[] { "AudioContent", }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.AnnotationsProto.http); + com.google.protobuf.Descriptors.FileDescriptor + .internalUpdateFileDescriptor(descriptor, registry); + com.google.api.AnnotationsProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/Voice.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/Voice.java new file mode 100644 index 000000000000..63b486b6e0d4 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/Voice.java @@ -0,0 +1,990 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +/** + *
+ * Description of a voice supported by the TTS service.
+ * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.Voice} + */ +public final class Voice extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.Voice) + VoiceOrBuilder { +private static final long serialVersionUID = 0L; + // Use Voice.newBuilder() to construct. + private Voice(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private Voice() { + languageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY; + name_ = ""; + ssmlGender_ = 0; + naturalSampleRateHertz_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Voice( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + languageCodes_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + languageCodes_.add(s); + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 24: { + int rawValue = input.readEnum(); + + ssmlGender_ = rawValue; + break; + } + case 32: { + + naturalSampleRateHertz_ = input.readInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + languageCodes_ = languageCodes_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_Voice_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_Voice_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.Voice.class, com.google.cloud.texttospeech.v1.Voice.Builder.class); + } + + private int bitField0_; + public static final int LANGUAGE_CODES_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList languageCodes_; + /** + *
+   * The languages that this voice supports, expressed as
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+   * "en-US", "es-419", "cmn-tw").
+   * 
+ * + * repeated string language_codes = 1; + */ + public com.google.protobuf.ProtocolStringList + getLanguageCodesList() { + return languageCodes_; + } + /** + *
+   * The languages that this voice supports, expressed as
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+   * "en-US", "es-419", "cmn-tw").
+   * 
+ * + * repeated string language_codes = 1; + */ + public int getLanguageCodesCount() { + return languageCodes_.size(); + } + /** + *
+   * The languages that this voice supports, expressed as
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+   * "en-US", "es-419", "cmn-tw").
+   * 
+ * + * repeated string language_codes = 1; + */ + public java.lang.String getLanguageCodes(int index) { + return languageCodes_.get(index); + } + /** + *
+   * The languages that this voice supports, expressed as
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+   * "en-US", "es-419", "cmn-tw").
+   * 
+ * + * repeated string language_codes = 1; + */ + public com.google.protobuf.ByteString + getLanguageCodesBytes(int index) { + return languageCodes_.getByteString(index); + } + + public static final int NAME_FIELD_NUMBER = 2; + private volatile java.lang.Object name_; + /** + *
+   * The name of this voice.  Each distinct voice has a unique name.
+   * 
+ * + * string name = 2; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + *
+   * The name of this voice.  Each distinct voice has a unique name.
+   * 
+ * + * string name = 2; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SSML_GENDER_FIELD_NUMBER = 3; + private int ssmlGender_; + /** + *
+   * The gender of this voice.
+   * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public int getSsmlGenderValue() { + return ssmlGender_; + } + /** + *
+   * The gender of this voice.
+   * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public com.google.cloud.texttospeech.v1.SsmlVoiceGender getSsmlGender() { + com.google.cloud.texttospeech.v1.SsmlVoiceGender result = com.google.cloud.texttospeech.v1.SsmlVoiceGender.valueOf(ssmlGender_); + return result == null ? com.google.cloud.texttospeech.v1.SsmlVoiceGender.UNRECOGNIZED : result; + } + + public static final int NATURAL_SAMPLE_RATE_HERTZ_FIELD_NUMBER = 4; + private int naturalSampleRateHertz_; + /** + *
+   * The natural sample rate (in hertz) for this voice.
+   * 
+ * + * int32 natural_sample_rate_hertz = 4; + */ + public int getNaturalSampleRateHertz() { + return naturalSampleRateHertz_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < languageCodes_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, languageCodes_.getRaw(i)); + } + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_); + } + if (ssmlGender_ != com.google.cloud.texttospeech.v1.SsmlVoiceGender.SSML_VOICE_GENDER_UNSPECIFIED.getNumber()) { + output.writeEnum(3, ssmlGender_); + } + if (naturalSampleRateHertz_ != 0) { + output.writeInt32(4, naturalSampleRateHertz_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < languageCodes_.size(); i++) { + dataSize += computeStringSizeNoTag(languageCodes_.getRaw(i)); + } + size += dataSize; + size += 1 * getLanguageCodesList().size(); + } + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_); + } + if (ssmlGender_ != com.google.cloud.texttospeech.v1.SsmlVoiceGender.SSML_VOICE_GENDER_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(3, ssmlGender_); + } + if (naturalSampleRateHertz_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(4, naturalSampleRateHertz_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.texttospeech.v1.Voice)) { + return super.equals(obj); + } + com.google.cloud.texttospeech.v1.Voice other = (com.google.cloud.texttospeech.v1.Voice) obj; + + boolean result = true; + result = result && getLanguageCodesList() + .equals(other.getLanguageCodesList()); + result = result && getName() + .equals(other.getName()); + result = result && ssmlGender_ == other.ssmlGender_; + result = result && (getNaturalSampleRateHertz() + == other.getNaturalSampleRateHertz()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getLanguageCodesCount() > 0) { + hash = (37 * hash) + LANGUAGE_CODES_FIELD_NUMBER; + hash = (53 * hash) + getLanguageCodesList().hashCode(); + } + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + SSML_GENDER_FIELD_NUMBER; + hash = (53 * hash) + ssmlGender_; + hash = (37 * hash) + NATURAL_SAMPLE_RATE_HERTZ_FIELD_NUMBER; + hash = (53 * hash) + getNaturalSampleRateHertz(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.texttospeech.v1.Voice parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.Voice parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.Voice parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.Voice parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.Voice parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.Voice parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.Voice parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.Voice parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.Voice parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.Voice parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.Voice parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.Voice parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.texttospeech.v1.Voice prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Description of a voice supported by the TTS service.
+   * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.Voice} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.texttospeech.v1.Voice) + com.google.cloud.texttospeech.v1.VoiceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_Voice_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_Voice_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.Voice.class, com.google.cloud.texttospeech.v1.Voice.Builder.class); + } + + // Construct using com.google.cloud.texttospeech.v1.Voice.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + languageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + name_ = ""; + + ssmlGender_ = 0; + + naturalSampleRateHertz_ = 0; + + return this; + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_Voice_descriptor; + } + + public com.google.cloud.texttospeech.v1.Voice getDefaultInstanceForType() { + return com.google.cloud.texttospeech.v1.Voice.getDefaultInstance(); + } + + public com.google.cloud.texttospeech.v1.Voice build() { + com.google.cloud.texttospeech.v1.Voice result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.google.cloud.texttospeech.v1.Voice buildPartial() { + com.google.cloud.texttospeech.v1.Voice result = new com.google.cloud.texttospeech.v1.Voice(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + languageCodes_ = languageCodes_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.languageCodes_ = languageCodes_; + result.name_ = name_; + result.ssmlGender_ = ssmlGender_; + result.naturalSampleRateHertz_ = naturalSampleRateHertz_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.texttospeech.v1.Voice) { + return mergeFrom((com.google.cloud.texttospeech.v1.Voice)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.texttospeech.v1.Voice other) { + if (other == com.google.cloud.texttospeech.v1.Voice.getDefaultInstance()) return this; + if (!other.languageCodes_.isEmpty()) { + if (languageCodes_.isEmpty()) { + languageCodes_ = other.languageCodes_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureLanguageCodesIsMutable(); + languageCodes_.addAll(other.languageCodes_); + } + onChanged(); + } + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.ssmlGender_ != 0) { + setSsmlGenderValue(other.getSsmlGenderValue()); + } + if (other.getNaturalSampleRateHertz() != 0) { + setNaturalSampleRateHertz(other.getNaturalSampleRateHertz()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.texttospeech.v1.Voice parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.texttospeech.v1.Voice) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private com.google.protobuf.LazyStringList languageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureLanguageCodesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + languageCodes_ = new com.google.protobuf.LazyStringArrayList(languageCodes_); + bitField0_ |= 0x00000001; + } + } + /** + *
+     * The languages that this voice supports, expressed as
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+     * "en-US", "es-419", "cmn-tw").
+     * 
+ * + * repeated string language_codes = 1; + */ + public com.google.protobuf.ProtocolStringList + getLanguageCodesList() { + return languageCodes_.getUnmodifiableView(); + } + /** + *
+     * The languages that this voice supports, expressed as
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+     * "en-US", "es-419", "cmn-tw").
+     * 
+ * + * repeated string language_codes = 1; + */ + public int getLanguageCodesCount() { + return languageCodes_.size(); + } + /** + *
+     * The languages that this voice supports, expressed as
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+     * "en-US", "es-419", "cmn-tw").
+     * 
+ * + * repeated string language_codes = 1; + */ + public java.lang.String getLanguageCodes(int index) { + return languageCodes_.get(index); + } + /** + *
+     * The languages that this voice supports, expressed as
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+     * "en-US", "es-419", "cmn-tw").
+     * 
+ * + * repeated string language_codes = 1; + */ + public com.google.protobuf.ByteString + getLanguageCodesBytes(int index) { + return languageCodes_.getByteString(index); + } + /** + *
+     * The languages that this voice supports, expressed as
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+     * "en-US", "es-419", "cmn-tw").
+     * 
+ * + * repeated string language_codes = 1; + */ + public Builder setLanguageCodes( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureLanguageCodesIsMutable(); + languageCodes_.set(index, value); + onChanged(); + return this; + } + /** + *
+     * The languages that this voice supports, expressed as
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+     * "en-US", "es-419", "cmn-tw").
+     * 
+ * + * repeated string language_codes = 1; + */ + public Builder addLanguageCodes( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureLanguageCodesIsMutable(); + languageCodes_.add(value); + onChanged(); + return this; + } + /** + *
+     * The languages that this voice supports, expressed as
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+     * "en-US", "es-419", "cmn-tw").
+     * 
+ * + * repeated string language_codes = 1; + */ + public Builder addAllLanguageCodes( + java.lang.Iterable values) { + ensureLanguageCodesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, languageCodes_); + onChanged(); + return this; + } + /** + *
+     * The languages that this voice supports, expressed as
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+     * "en-US", "es-419", "cmn-tw").
+     * 
+ * + * repeated string language_codes = 1; + */ + public Builder clearLanguageCodes() { + languageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + *
+     * The languages that this voice supports, expressed as
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+     * "en-US", "es-419", "cmn-tw").
+     * 
+ * + * repeated string language_codes = 1; + */ + public Builder addLanguageCodesBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureLanguageCodesIsMutable(); + languageCodes_.add(value); + onChanged(); + return this; + } + + private java.lang.Object name_ = ""; + /** + *
+     * The name of this voice.  Each distinct voice has a unique name.
+     * 
+ * + * string name = 2; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * The name of this voice.  Each distinct voice has a unique name.
+     * 
+ * + * string name = 2; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * The name of this voice.  Each distinct voice has a unique name.
+     * 
+ * + * string name = 2; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + *
+     * The name of this voice.  Each distinct voice has a unique name.
+     * 
+ * + * string name = 2; + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + *
+     * The name of this voice.  Each distinct voice has a unique name.
+     * 
+ * + * string name = 2; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private int ssmlGender_ = 0; + /** + *
+     * The gender of this voice.
+     * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public int getSsmlGenderValue() { + return ssmlGender_; + } + /** + *
+     * The gender of this voice.
+     * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public Builder setSsmlGenderValue(int value) { + ssmlGender_ = value; + onChanged(); + return this; + } + /** + *
+     * The gender of this voice.
+     * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public com.google.cloud.texttospeech.v1.SsmlVoiceGender getSsmlGender() { + com.google.cloud.texttospeech.v1.SsmlVoiceGender result = com.google.cloud.texttospeech.v1.SsmlVoiceGender.valueOf(ssmlGender_); + return result == null ? com.google.cloud.texttospeech.v1.SsmlVoiceGender.UNRECOGNIZED : result; + } + /** + *
+     * The gender of this voice.
+     * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public Builder setSsmlGender(com.google.cloud.texttospeech.v1.SsmlVoiceGender value) { + if (value == null) { + throw new NullPointerException(); + } + + ssmlGender_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
+     * The gender of this voice.
+     * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public Builder clearSsmlGender() { + + ssmlGender_ = 0; + onChanged(); + return this; + } + + private int naturalSampleRateHertz_ ; + /** + *
+     * The natural sample rate (in hertz) for this voice.
+     * 
+ * + * int32 natural_sample_rate_hertz = 4; + */ + public int getNaturalSampleRateHertz() { + return naturalSampleRateHertz_; + } + /** + *
+     * The natural sample rate (in hertz) for this voice.
+     * 
+ * + * int32 natural_sample_rate_hertz = 4; + */ + public Builder setNaturalSampleRateHertz(int value) { + + naturalSampleRateHertz_ = value; + onChanged(); + return this; + } + /** + *
+     * The natural sample rate (in hertz) for this voice.
+     * 
+ * + * int32 natural_sample_rate_hertz = 4; + */ + public Builder clearNaturalSampleRateHertz() { + + naturalSampleRateHertz_ = 0; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.Voice) + } + + // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.Voice) + private static final com.google.cloud.texttospeech.v1.Voice DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.Voice(); + } + + public static com.google.cloud.texttospeech.v1.Voice getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + public Voice parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Voice(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public com.google.cloud.texttospeech.v1.Voice getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceOrBuilder.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceOrBuilder.java new file mode 100644 index 000000000000..0275938f644a --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceOrBuilder.java @@ -0,0 +1,96 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +public interface VoiceOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.texttospeech.v1.Voice) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * The languages that this voice supports, expressed as
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+   * "en-US", "es-419", "cmn-tw").
+   * 
+ * + * repeated string language_codes = 1; + */ + java.util.List + getLanguageCodesList(); + /** + *
+   * The languages that this voice supports, expressed as
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+   * "en-US", "es-419", "cmn-tw").
+   * 
+ * + * repeated string language_codes = 1; + */ + int getLanguageCodesCount(); + /** + *
+   * The languages that this voice supports, expressed as
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+   * "en-US", "es-419", "cmn-tw").
+   * 
+ * + * repeated string language_codes = 1; + */ + java.lang.String getLanguageCodes(int index); + /** + *
+   * The languages that this voice supports, expressed as
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+   * "en-US", "es-419", "cmn-tw").
+   * 
+ * + * repeated string language_codes = 1; + */ + com.google.protobuf.ByteString + getLanguageCodesBytes(int index); + + /** + *
+   * The name of this voice.  Each distinct voice has a unique name.
+   * 
+ * + * string name = 2; + */ + java.lang.String getName(); + /** + *
+   * The name of this voice.  Each distinct voice has a unique name.
+   * 
+ * + * string name = 2; + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + *
+   * The gender of this voice.
+   * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + int getSsmlGenderValue(); + /** + *
+   * The gender of this voice.
+   * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + com.google.cloud.texttospeech.v1.SsmlVoiceGender getSsmlGender(); + + /** + *
+   * The natural sample rate (in hertz) for this voice.
+   * 
+ * + * int32 natural_sample_rate_hertz = 4; + */ + int getNaturalSampleRateHertz(); +} diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceSelectionParams.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceSelectionParams.java new file mode 100644 index 000000000000..27475c7dfbfc --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceSelectionParams.java @@ -0,0 +1,923 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +/** + *
+ * Description of which voice to use for a synthesis request.
+ * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.VoiceSelectionParams} + */ +public final class VoiceSelectionParams extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.VoiceSelectionParams) + VoiceSelectionParamsOrBuilder { +private static final long serialVersionUID = 0L; + // Use VoiceSelectionParams.newBuilder() to construct. + private VoiceSelectionParams(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private VoiceSelectionParams() { + languageCode_ = ""; + name_ = ""; + ssmlGender_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private VoiceSelectionParams( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + languageCode_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 24: { + int rawValue = input.readEnum(); + + ssmlGender_ = rawValue; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.VoiceSelectionParams.class, com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder.class); + } + + public static final int LANGUAGE_CODE_FIELD_NUMBER = 1; + private volatile java.lang.Object languageCode_; + /** + *
+   * The language (and optionally also the region) of the voice expressed as a
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g.
+   * "en-US". Required. This should not include a script tag (e.g. use
+   * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred
+   * from the input provided in the SynthesisInput.  The TTS service
+   * will use this parameter to help choose an appropriate voice.  Note that
+   * the TTS service may choose a voice with a slightly different language code
+   * than the one selected; it may substitute a different region
+   * (e.g. using en-US rather than en-CA if there isn't a Canadian voice
+   * available), or even a different language, e.g. using "nb" (Norwegian
+   * Bokmal) instead of "no" (Norwegian)".
+   * 
+ * + * string language_code = 1; + */ + public java.lang.String getLanguageCode() { + java.lang.Object ref = languageCode_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + languageCode_ = s; + return s; + } + } + /** + *
+   * The language (and optionally also the region) of the voice expressed as a
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g.
+   * "en-US". Required. This should not include a script tag (e.g. use
+   * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred
+   * from the input provided in the SynthesisInput.  The TTS service
+   * will use this parameter to help choose an appropriate voice.  Note that
+   * the TTS service may choose a voice with a slightly different language code
+   * than the one selected; it may substitute a different region
+   * (e.g. using en-US rather than en-CA if there isn't a Canadian voice
+   * available), or even a different language, e.g. using "nb" (Norwegian
+   * Bokmal) instead of "no" (Norwegian)".
+   * 
+ * + * string language_code = 1; + */ + public com.google.protobuf.ByteString + getLanguageCodeBytes() { + java.lang.Object ref = languageCode_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + languageCode_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int NAME_FIELD_NUMBER = 2; + private volatile java.lang.Object name_; + /** + *
+   * The name of the voice. Optional; if not set, the service will choose a
+   * voice based on the other parameters such as language_code and gender.
+   * 
+ * + * string name = 2; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + *
+   * The name of the voice. Optional; if not set, the service will choose a
+   * voice based on the other parameters such as language_code and gender.
+   * 
+ * + * string name = 2; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SSML_GENDER_FIELD_NUMBER = 3; + private int ssmlGender_; + /** + *
+   * The preferred gender of the voice. Optional; if not set, the service will
+   * choose a voice based on the other parameters such as language_code and
+   * name. Note that this is only a preference, not requirement; if a
+   * voice of the appropriate gender is not available, the synthesizer should
+   * substitute a voice with a different gender rather than failing the request.
+   * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public int getSsmlGenderValue() { + return ssmlGender_; + } + /** + *
+   * The preferred gender of the voice. Optional; if not set, the service will
+   * choose a voice based on the other parameters such as language_code and
+   * name. Note that this is only a preference, not requirement; if a
+   * voice of the appropriate gender is not available, the synthesizer should
+   * substitute a voice with a different gender rather than failing the request.
+   * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public com.google.cloud.texttospeech.v1.SsmlVoiceGender getSsmlGender() { + com.google.cloud.texttospeech.v1.SsmlVoiceGender result = com.google.cloud.texttospeech.v1.SsmlVoiceGender.valueOf(ssmlGender_); + return result == null ? com.google.cloud.texttospeech.v1.SsmlVoiceGender.UNRECOGNIZED : result; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getLanguageCodeBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, languageCode_); + } + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_); + } + if (ssmlGender_ != com.google.cloud.texttospeech.v1.SsmlVoiceGender.SSML_VOICE_GENDER_UNSPECIFIED.getNumber()) { + output.writeEnum(3, ssmlGender_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getLanguageCodeBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, languageCode_); + } + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_); + } + if (ssmlGender_ != com.google.cloud.texttospeech.v1.SsmlVoiceGender.SSML_VOICE_GENDER_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(3, ssmlGender_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.texttospeech.v1.VoiceSelectionParams)) { + return super.equals(obj); + } + com.google.cloud.texttospeech.v1.VoiceSelectionParams other = (com.google.cloud.texttospeech.v1.VoiceSelectionParams) obj; + + boolean result = true; + result = result && getLanguageCode() + .equals(other.getLanguageCode()); + result = result && getName() + .equals(other.getName()); + result = result && ssmlGender_ == other.ssmlGender_; + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER; + hash = (53 * hash) + getLanguageCode().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + SSML_GENDER_FIELD_NUMBER; + hash = (53 * hash) + ssmlGender_; + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.google.cloud.texttospeech.v1.VoiceSelectionParams prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+   * Description of which voice to use for a synthesis request.
+   * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1.VoiceSelectionParams} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:google.cloud.texttospeech.v1.VoiceSelectionParams) + com.google.cloud.texttospeech.v1.VoiceSelectionParamsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.VoiceSelectionParams.class, com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder.class); + } + + // Construct using com.google.cloud.texttospeech.v1.VoiceSelectionParams.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + languageCode_ = ""; + + name_ = ""; + + ssmlGender_ = 0; + + return this; + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_descriptor; + } + + public com.google.cloud.texttospeech.v1.VoiceSelectionParams getDefaultInstanceForType() { + return com.google.cloud.texttospeech.v1.VoiceSelectionParams.getDefaultInstance(); + } + + public com.google.cloud.texttospeech.v1.VoiceSelectionParams build() { + com.google.cloud.texttospeech.v1.VoiceSelectionParams result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public com.google.cloud.texttospeech.v1.VoiceSelectionParams buildPartial() { + com.google.cloud.texttospeech.v1.VoiceSelectionParams result = new com.google.cloud.texttospeech.v1.VoiceSelectionParams(this); + result.languageCode_ = languageCode_; + result.name_ = name_; + result.ssmlGender_ = ssmlGender_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.texttospeech.v1.VoiceSelectionParams) { + return mergeFrom((com.google.cloud.texttospeech.v1.VoiceSelectionParams)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.texttospeech.v1.VoiceSelectionParams other) { + if (other == com.google.cloud.texttospeech.v1.VoiceSelectionParams.getDefaultInstance()) return this; + if (!other.getLanguageCode().isEmpty()) { + languageCode_ = other.languageCode_; + onChanged(); + } + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.ssmlGender_ != 0) { + setSsmlGenderValue(other.getSsmlGenderValue()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.texttospeech.v1.VoiceSelectionParams parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.texttospeech.v1.VoiceSelectionParams) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object languageCode_ = ""; + /** + *
+     * The language (and optionally also the region) of the voice expressed as a
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g.
+     * "en-US". Required. This should not include a script tag (e.g. use
+     * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred
+     * from the input provided in the SynthesisInput.  The TTS service
+     * will use this parameter to help choose an appropriate voice.  Note that
+     * the TTS service may choose a voice with a slightly different language code
+     * than the one selected; it may substitute a different region
+     * (e.g. using en-US rather than en-CA if there isn't a Canadian voice
+     * available), or even a different language, e.g. using "nb" (Norwegian
+     * Bokmal) instead of "no" (Norwegian)".
+     * 
+ * + * string language_code = 1; + */ + public java.lang.String getLanguageCode() { + java.lang.Object ref = languageCode_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + languageCode_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * The language (and optionally also the region) of the voice expressed as a
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g.
+     * "en-US". Required. This should not include a script tag (e.g. use
+     * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred
+     * from the input provided in the SynthesisInput.  The TTS service
+     * will use this parameter to help choose an appropriate voice.  Note that
+     * the TTS service may choose a voice with a slightly different language code
+     * than the one selected; it may substitute a different region
+     * (e.g. using en-US rather than en-CA if there isn't a Canadian voice
+     * available), or even a different language, e.g. using "nb" (Norwegian
+     * Bokmal) instead of "no" (Norwegian)".
+     * 
+ * + * string language_code = 1; + */ + public com.google.protobuf.ByteString + getLanguageCodeBytes() { + java.lang.Object ref = languageCode_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + languageCode_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * The language (and optionally also the region) of the voice expressed as a
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g.
+     * "en-US". Required. This should not include a script tag (e.g. use
+     * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred
+     * from the input provided in the SynthesisInput.  The TTS service
+     * will use this parameter to help choose an appropriate voice.  Note that
+     * the TTS service may choose a voice with a slightly different language code
+     * than the one selected; it may substitute a different region
+     * (e.g. using en-US rather than en-CA if there isn't a Canadian voice
+     * available), or even a different language, e.g. using "nb" (Norwegian
+     * Bokmal) instead of "no" (Norwegian)".
+     * 
+ * + * string language_code = 1; + */ + public Builder setLanguageCode( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + languageCode_ = value; + onChanged(); + return this; + } + /** + *
+     * The language (and optionally also the region) of the voice expressed as a
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g.
+     * "en-US". Required. This should not include a script tag (e.g. use
+     * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred
+     * from the input provided in the SynthesisInput.  The TTS service
+     * will use this parameter to help choose an appropriate voice.  Note that
+     * the TTS service may choose a voice with a slightly different language code
+     * than the one selected; it may substitute a different region
+     * (e.g. using en-US rather than en-CA if there isn't a Canadian voice
+     * available), or even a different language, e.g. using "nb" (Norwegian
+     * Bokmal) instead of "no" (Norwegian)".
+     * 
+ * + * string language_code = 1; + */ + public Builder clearLanguageCode() { + + languageCode_ = getDefaultInstance().getLanguageCode(); + onChanged(); + return this; + } + /** + *
+     * The language (and optionally also the region) of the voice expressed as a
+     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g.
+     * "en-US". Required. This should not include a script tag (e.g. use
+     * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred
+     * from the input provided in the SynthesisInput.  The TTS service
+     * will use this parameter to help choose an appropriate voice.  Note that
+     * the TTS service may choose a voice with a slightly different language code
+     * than the one selected; it may substitute a different region
+     * (e.g. using en-US rather than en-CA if there isn't a Canadian voice
+     * available), or even a different language, e.g. using "nb" (Norwegian
+     * Bokmal) instead of "no" (Norwegian)".
+     * 
+ * + * string language_code = 1; + */ + public Builder setLanguageCodeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + languageCode_ = value; + onChanged(); + return this; + } + + private java.lang.Object name_ = ""; + /** + *
+     * The name of the voice. Optional; if not set, the service will choose a
+     * voice based on the other parameters such as language_code and gender.
+     * 
+ * + * string name = 2; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+     * The name of the voice. Optional; if not set, the service will choose a
+     * voice based on the other parameters such as language_code and gender.
+     * 
+ * + * string name = 2; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+     * The name of the voice. Optional; if not set, the service will choose a
+     * voice based on the other parameters such as language_code and gender.
+     * 
+ * + * string name = 2; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + *
+     * The name of the voice. Optional; if not set, the service will choose a
+     * voice based on the other parameters such as language_code and gender.
+     * 
+ * + * string name = 2; + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + *
+     * The name of the voice. Optional; if not set, the service will choose a
+     * voice based on the other parameters such as language_code and gender.
+     * 
+ * + * string name = 2; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private int ssmlGender_ = 0; + /** + *
+     * The preferred gender of the voice. Optional; if not set, the service will
+     * choose a voice based on the other parameters such as language_code and
+     * name. Note that this is only a preference, not requirement; if a
+     * voice of the appropriate gender is not available, the synthesizer should
+     * substitute a voice with a different gender rather than failing the request.
+     * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public int getSsmlGenderValue() { + return ssmlGender_; + } + /** + *
+     * The preferred gender of the voice. Optional; if not set, the service will
+     * choose a voice based on the other parameters such as language_code and
+     * name. Note that this is only a preference, not requirement; if a
+     * voice of the appropriate gender is not available, the synthesizer should
+     * substitute a voice with a different gender rather than failing the request.
+     * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public Builder setSsmlGenderValue(int value) { + ssmlGender_ = value; + onChanged(); + return this; + } + /** + *
+     * The preferred gender of the voice. Optional; if not set, the service will
+     * choose a voice based on the other parameters such as language_code and
+     * name. Note that this is only a preference, not requirement; if a
+     * voice of the appropriate gender is not available, the synthesizer should
+     * substitute a voice with a different gender rather than failing the request.
+     * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public com.google.cloud.texttospeech.v1.SsmlVoiceGender getSsmlGender() { + com.google.cloud.texttospeech.v1.SsmlVoiceGender result = com.google.cloud.texttospeech.v1.SsmlVoiceGender.valueOf(ssmlGender_); + return result == null ? com.google.cloud.texttospeech.v1.SsmlVoiceGender.UNRECOGNIZED : result; + } + /** + *
+     * The preferred gender of the voice. Optional; if not set, the service will
+     * choose a voice based on the other parameters such as language_code and
+     * name. Note that this is only a preference, not requirement; if a
+     * voice of the appropriate gender is not available, the synthesizer should
+     * substitute a voice with a different gender rather than failing the request.
+     * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public Builder setSsmlGender(com.google.cloud.texttospeech.v1.SsmlVoiceGender value) { + if (value == null) { + throw new NullPointerException(); + } + + ssmlGender_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
+     * The preferred gender of the voice. Optional; if not set, the service will
+     * choose a voice based on the other parameters such as language_code and
+     * name. Note that this is only a preference, not requirement; if a
+     * voice of the appropriate gender is not available, the synthesizer should
+     * substitute a voice with a different gender rather than failing the request.
+     * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + public Builder clearSsmlGender() { + + ssmlGender_ = 0; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFieldsProto3(unknownFields); + } + + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.VoiceSelectionParams) + } + + // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.VoiceSelectionParams) + private static final com.google.cloud.texttospeech.v1.VoiceSelectionParams DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.VoiceSelectionParams(); + } + + public static com.google.cloud.texttospeech.v1.VoiceSelectionParams getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + public VoiceSelectionParams parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new VoiceSelectionParams(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public com.google.cloud.texttospeech.v1.VoiceSelectionParams getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceSelectionParamsOrBuilder.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceSelectionParamsOrBuilder.java new file mode 100644 index 000000000000..9c1b7706ca7e --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceSelectionParamsOrBuilder.java @@ -0,0 +1,92 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1; + +public interface VoiceSelectionParamsOrBuilder extends + // @@protoc_insertion_point(interface_extends:google.cloud.texttospeech.v1.VoiceSelectionParams) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   * The language (and optionally also the region) of the voice expressed as a
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g.
+   * "en-US". Required. This should not include a script tag (e.g. use
+   * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred
+   * from the input provided in the SynthesisInput.  The TTS service
+   * will use this parameter to help choose an appropriate voice.  Note that
+   * the TTS service may choose a voice with a slightly different language code
+   * than the one selected; it may substitute a different region
+   * (e.g. using en-US rather than en-CA if there isn't a Canadian voice
+   * available), or even a different language, e.g. using "nb" (Norwegian
+   * Bokmal) instead of "no" (Norwegian)".
+   * 
+ * + * string language_code = 1; + */ + java.lang.String getLanguageCode(); + /** + *
+   * The language (and optionally also the region) of the voice expressed as a
+   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g.
+   * "en-US". Required. This should not include a script tag (e.g. use
+   * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred
+   * from the input provided in the SynthesisInput.  The TTS service
+   * will use this parameter to help choose an appropriate voice.  Note that
+   * the TTS service may choose a voice with a slightly different language code
+   * than the one selected; it may substitute a different region
+   * (e.g. using en-US rather than en-CA if there isn't a Canadian voice
+   * available), or even a different language, e.g. using "nb" (Norwegian
+   * Bokmal) instead of "no" (Norwegian)".
+   * 
+ * + * string language_code = 1; + */ + com.google.protobuf.ByteString + getLanguageCodeBytes(); + + /** + *
+   * The name of the voice. Optional; if not set, the service will choose a
+   * voice based on the other parameters such as language_code and gender.
+   * 
+ * + * string name = 2; + */ + java.lang.String getName(); + /** + *
+   * The name of the voice. Optional; if not set, the service will choose a
+   * voice based on the other parameters such as language_code and gender.
+   * 
+ * + * string name = 2; + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + *
+   * The preferred gender of the voice. Optional; if not set, the service will
+   * choose a voice based on the other parameters such as language_code and
+   * name. Note that this is only a preference, not requirement; if a
+   * voice of the appropriate gender is not available, the synthesizer should
+   * substitute a voice with a different gender rather than failing the request.
+   * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + int getSsmlGenderValue(); + /** + *
+   * The preferred gender of the voice. Optional; if not set, the service will
+   * choose a voice based on the other parameters such as language_code and
+   * name. Note that this is only a preference, not requirement; if a
+   * voice of the appropriate gender is not available, the synthesizer should
+   * substitute a voice with a different gender rather than failing the request.
+   * 
+ * + * .google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3; + */ + com.google.cloud.texttospeech.v1.SsmlVoiceGender getSsmlGender(); +} diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/proto/google/cloud/texttospeech/v1/cloud_tts.proto b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/proto/google/cloud/texttospeech/v1/cloud_tts.proto new file mode 100644 index 000000000000..0ccbde389214 --- /dev/null +++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/proto/google/cloud/texttospeech/v1/cloud_tts.proto @@ -0,0 +1,225 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.texttospeech.v1; + +import "google/api/annotations.proto"; + +option cc_enable_arenas = true; +option csharp_namespace = "Google.Cloud.TextToSpeech.V1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/texttospeech/v1;texttospeech"; +option java_multiple_files = true; +option java_outer_classname = "TextToSpeechProto"; +option java_package = "com.google.cloud.texttospeech.v1"; +option php_namespace = "Google\\Cloud\\TextToSpeech\\V1"; + + +// Service that implements Google Cloud Text-to-Speech API. +service TextToSpeech { + // Returns a list of [Voice][google.cloud.texttospeech.v1.Voice] + // supported for synthesis. + rpc ListVoices(ListVoicesRequest) returns (ListVoicesResponse) { + option (google.api.http) = { + get: "/v1/voices" + }; + } + + // Synthesizes speech synchronously: receive results after all text input + // has been processed. + rpc SynthesizeSpeech(SynthesizeSpeechRequest) returns (SynthesizeSpeechResponse) { + option (google.api.http) = { + post: "/v1/text:synthesize" + body: "*" + }; + } +} + +// The top-level message sent by the client for the `ListVoices` method. +message ListVoicesRequest { + // Optional (but recommended) + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If + // specified, the ListVoices call will only return voices that can be used to + // synthesize this language_code. E.g. when specifying "en-NZ", you will get + // supported "en-*" voices; when specifying "no", you will get supported + // "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" + // will also get supported "cmn-*" voices; specifying "zh-hk" will also get + // supported "yue-*" voices. + string language_code = 1; +} + +// The message returned to the client by the `ListVoices` method. +message ListVoicesResponse { + // The list of voices. + repeated Voice voices = 1; +} + +// Description of a voice supported by the TTS service. +message Voice { + // The languages that this voice supports, expressed as + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + // "en-US", "es-419", "cmn-tw"). + repeated string language_codes = 1; + + // The name of this voice. Each distinct voice has a unique name. + string name = 2; + + // The gender of this voice. + SsmlVoiceGender ssml_gender = 3; + + // The natural sample rate (in hertz) for this voice. + int32 natural_sample_rate_hertz = 4; +} + +// The top-level message sent by the client for the `SynthesizeSpeech` method. +message SynthesizeSpeechRequest { + // Required. The Synthesizer requires either plain text or SSML as input. + SynthesisInput input = 1; + + // Required. The desired voice of the synthesized audio. + VoiceSelectionParams voice = 2; + + // Required. The configuration of the synthesized audio. + AudioConfig audio_config = 3; +} + +// Contains text input to be synthesized. Either `text` or `ssml` must be +// supplied. Supplying both or neither returns +// [google.rpc.Code.INVALID_ARGUMENT][]. The input size is limited to 5000 +// characters. +message SynthesisInput { + // The input source, which is either plain text or SSML. + oneof input_source { + // The raw text to be synthesized. + string text = 1; + + // The SSML document to be synthesized. The SSML document must be valid + // and well-formed. Otherwise the RPC will fail and return + // [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see + // [SSML](/speech/text-to-speech/docs/ssml). + string ssml = 2; + } +} + +// Description of which voice to use for a synthesis request. +message VoiceSelectionParams { + // The language (and optionally also the region) of the voice expressed as a + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. + // "en-US". Required. This should not include a script tag (e.g. use + // "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred + // from the input provided in the SynthesisInput. The TTS service + // will use this parameter to help choose an appropriate voice. Note that + // the TTS service may choose a voice with a slightly different language code + // than the one selected; it may substitute a different region + // (e.g. using en-US rather than en-CA if there isn't a Canadian voice + // available), or even a different language, e.g. using "nb" (Norwegian + // Bokmal) instead of "no" (Norwegian)". + string language_code = 1; + + // The name of the voice. Optional; if not set, the service will choose a + // voice based on the other parameters such as language_code and gender. + string name = 2; + + // The preferred gender of the voice. Optional; if not set, the service will + // choose a voice based on the other parameters such as language_code and + // name. Note that this is only a preference, not requirement; if a + // voice of the appropriate gender is not available, the synthesizer should + // substitute a voice with a different gender rather than failing the request. + SsmlVoiceGender ssml_gender = 3; +} + +// Description of audio data to be synthesized. +message AudioConfig { + // Required. The format of the requested audio byte stream. + AudioEncoding audio_encoding = 1; + + // Optional speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal + // native speed supported by the specific voice. 2.0 is twice as fast, and + // 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any + // other values < 0.25 or > 4.0 will return an error. + double speaking_rate = 2; + + // Optional speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20 + // semitones from the original pitch. -20 means decrease 20 semitones from the + // original pitch. + double pitch = 3; + + // Optional volume gain (in dB) of the normal native volume supported by the + // specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of + // 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB) + // will play at approximately half the amplitude of the normal native signal + // amplitude. A value of +6.0 (dB) will play at approximately twice the + // amplitude of the normal native signal amplitude. Strongly recommend not to + // exceed +10 (dB) as there's usually no effective increase in loudness for + // any value greater than that. + double volume_gain_db = 4; + + // The synthesis sample rate (in hertz) for this audio. Optional. If this is + // different from the voice's natural sample rate, then the synthesizer will + // honor this request by converting to the desired sample rate (which might + // result in worse audio quality), unless the specified sample rate is not + // supported for the encoding chosen, in which case it will fail the request + // and return [google.rpc.Code.INVALID_ARGUMENT][]. + int32 sample_rate_hertz = 5; +} + +// The message returned to the client by the `SynthesizeSpeech` method. +message SynthesizeSpeechResponse { + // The audio data bytes encoded as specified in the request, including the + // header (For LINEAR16 audio, we include the WAV header). Note: as + // with all bytes fields, protobuffers use a pure binary representation, + // whereas JSON representations use base64. + bytes audio_content = 1; +} + +// Gender of the voice as described in +// [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice). +enum SsmlVoiceGender { + // An unspecified gender. + // In VoiceSelectionParams, this means that the client doesn't care which + // gender the selected voice will have. In the Voice field of + // ListVoicesResponse, this may mean that the voice doesn't fit any of the + // other categories in this enum, or that the gender of the voice isn't known. + SSML_VOICE_GENDER_UNSPECIFIED = 0; + + // A male voice. + MALE = 1; + + // A female voice. + FEMALE = 2; + + // A gender-neutral voice. + NEUTRAL = 3; +} + +// Configuration to set up audio encoder. The encoding determines the output +// audio format that we'd like. +enum AudioEncoding { + // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][]. + AUDIO_ENCODING_UNSPECIFIED = 0; + + // Uncompressed 16-bit signed little-endian samples (Linear PCM). + // Audio content returned as LINEAR16 also contains a WAV header. + LINEAR16 = 1; + + // MP3 audio. + MP3 = 2; + + // Opus encoded audio wrapped in an ogg container. The result will be a + // file which can be played natively on Android, and in browsers (at least + // Chrome and Firefox). The quality of the encoding is considerably higher + // than MP3 while using approximately the same bitrate. + OGG_OPUS = 3; +} diff --git a/google-cloud-bom/pom.xml b/google-cloud-bom/pom.xml index 764858bc1f04..d5bb627b2b13 100644 --- a/google-cloud-bom/pom.xml +++ b/google-cloud-bom/pom.xml @@ -755,6 +755,16 @@ grpc-google-cloud-texttospeech-v1beta1 0.14.1-SNAPSHOT
+ + com.google.api.grpc + proto-google-cloud-texttospeech-v1 + 0.14.1-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-texttospeech-v1 + 0.14.1-SNAPSHOT + com.google.cloud google-cloud-trace diff --git a/google-cloud-clients/google-cloud-texttospeech/README.md b/google-cloud-clients/google-cloud-texttospeech/README.md index 97d6a44aed6b..a526be6e51ef 100644 --- a/google-cloud-clients/google-cloud-texttospeech/README.md +++ b/google-cloud-clients/google-cloud-texttospeech/README.md @@ -99,4 +99,4 @@ Apache 2.0 - See [LICENSE] for more information. [cloud-platform]: https://cloud.google.com/ [cloud-texttospeech]: https://cloud.google.com/texttospeech [texttospeech-product-docs]: https://cloud.google.com/texttospeech/docs -[texttospeech-client-lib-docs]: https://googlecloudplatform.github.io/google-cloud-java/google-cloud-clients/apidocs/index.html?com/google/cloud/texttospeech/v1beta1/package-summary.html +[texttospeech-client-lib-docs]: https://googlecloudplatform.github.io/google-cloud-java/google-cloud-clients/apidocs/index.html?com/google/cloud/texttospeech/v1/package-summary.html diff --git a/google-cloud-clients/google-cloud-texttospeech/pom.xml b/google-cloud-clients/google-cloud-texttospeech/pom.xml index 28ff83f107c2..4f6e0931a13e 100644 --- a/google-cloud-clients/google-cloud-texttospeech/pom.xml +++ b/google-cloud-clients/google-cloud-texttospeech/pom.xml @@ -30,6 +30,10 @@ com.google.api.grpc proto-google-cloud-texttospeech-v1beta1 + + com.google.api.grpc + proto-google-cloud-texttospeech-v1 + io.grpc grpc-netty-shaded @@ -68,6 +72,11 @@ grpc-google-cloud-texttospeech-v1beta1 test + + com.google.api.grpc + grpc-google-cloud-texttospeech-v1 + test + com.google.api gax-grpc diff --git a/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechClient.java b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechClient.java new file mode 100644 index 000000000000..8d5c3599cfcf --- /dev/null +++ b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechClient.java @@ -0,0 +1,329 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.texttospeech.v1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.texttospeech.v1.stub.TextToSpeechStub; +import com.google.cloud.texttospeech.v1.stub.TextToSpeechStubSettings; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND SERVICE +/** + * Service Description: Service that implements Google Cloud Text-to-Speech API. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

+ * 
+ * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+ *   String languageCode = "";
+ *   ListVoicesResponse response = textToSpeechClient.listVoices(languageCode);
+ * }
+ * 
+ * 
+ * + *

Note: close() needs to be called on the textToSpeechClient object to clean up resources such + * as threads. In the example above, try-with-resources is used, which automatically calls close(). + * + *

The surface of this class includes several types of Java methods for each of the API's + * methods: + * + *

    + *
  1. A "flattened" method. With this type of method, the fields of the request type have been + * converted into function parameters. It may be the case that not all fields are available as + * parameters, and not every API method will have a flattened method entry point. + *
  2. A "request object" method. This type of method only takes one parameter, a request object, + * which must be constructed before the call. Not every API method will have a request object + * method. + *
  3. A "callable" method. This type of method takes no parameters and returns an immutable API + * callable object, which can be used to initiate calls to the service. + *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of TextToSpeechSettings to + * create(). For example: + * + *

To customize credentials: + * + *

+ * 
+ * TextToSpeechSettings textToSpeechSettings =
+ *     TextToSpeechSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * TextToSpeechClient textToSpeechClient =
+ *     TextToSpeechClient.create(textToSpeechSettings);
+ * 
+ * 
+ * + * To customize the endpoint: + * + *
+ * 
+ * TextToSpeechSettings textToSpeechSettings =
+ *     TextToSpeechSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * TextToSpeechClient textToSpeechClient =
+ *     TextToSpeechClient.create(textToSpeechSettings);
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class TextToSpeechClient implements BackgroundResource { + private final TextToSpeechSettings settings; + private final TextToSpeechStub stub; + + /** Constructs an instance of TextToSpeechClient with default settings. */ + public static final TextToSpeechClient create() throws IOException { + return create(TextToSpeechSettings.newBuilder().build()); + } + + /** + * Constructs an instance of TextToSpeechClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final TextToSpeechClient create(TextToSpeechSettings settings) throws IOException { + return new TextToSpeechClient(settings); + } + + /** + * Constructs an instance of TextToSpeechClient, using the given stub for making calls. This is + * for advanced usage - prefer to use TextToSpeechSettings}. + */ + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public static final TextToSpeechClient create(TextToSpeechStub stub) { + return new TextToSpeechClient(stub); + } + + /** + * Constructs an instance of TextToSpeechClient, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected TextToSpeechClient(TextToSpeechSettings settings) throws IOException { + this.settings = settings; + this.stub = ((TextToSpeechStubSettings) settings.getStubSettings()).createStub(); + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + protected TextToSpeechClient(TextToSpeechStub stub) { + this.settings = null; + this.stub = stub; + } + + public final TextToSpeechSettings getSettings() { + return settings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public TextToSpeechStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice] supported for synthesis. + * + *

Sample code: + * + *


+   * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+   *   String languageCode = "";
+   *   ListVoicesResponse response = textToSpeechClient.listVoices(languageCode);
+   * }
+   * 
+ * + * @param languageCode Optional (but recommended) + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If specified, the + * ListVoices call will only return voices that can be used to synthesize this language_code. + * E.g. when specifying "en-NZ", you will get supported "en-*" voices; when specifying + * "no", you will get supported "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) + * voices; specifying "zh" will also get supported "cmn-*" voices; specifying "zh-hk" will + * also get supported "yue-*" voices. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListVoicesResponse listVoices(String languageCode) { + + ListVoicesRequest request = + ListVoicesRequest.newBuilder().setLanguageCode(languageCode).build(); + return listVoices(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice] supported for synthesis. + * + *

Sample code: + * + *


+   * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+   *   ListVoicesRequest request = ListVoicesRequest.newBuilder().build();
+   *   ListVoicesResponse response = textToSpeechClient.listVoices(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListVoicesResponse listVoices(ListVoicesRequest request) { + return listVoicesCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice] supported for synthesis. + * + *

Sample code: + * + *


+   * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+   *   ListVoicesRequest request = ListVoicesRequest.newBuilder().build();
+   *   ApiFuture<ListVoicesResponse> future = textToSpeechClient.listVoicesCallable().futureCall(request);
+   *   // Do something
+   *   ListVoicesResponse response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable listVoicesCallable() { + return stub.listVoicesCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Synthesizes speech synchronously: receive results after all text input has been processed. + * + *

Sample code: + * + *


+   * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+   *   SynthesisInput input = SynthesisInput.newBuilder().build();
+   *   VoiceSelectionParams voice = VoiceSelectionParams.newBuilder().build();
+   *   AudioConfig audioConfig = AudioConfig.newBuilder().build();
+   *   SynthesizeSpeechResponse response = textToSpeechClient.synthesizeSpeech(input, voice, audioConfig);
+   * }
+   * 
+ * + * @param input Required. The Synthesizer requires either plain text or SSML as input. + * @param voice Required. The desired voice of the synthesized audio. + * @param audioConfig Required. The configuration of the synthesized audio. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final SynthesizeSpeechResponse synthesizeSpeech( + SynthesisInput input, VoiceSelectionParams voice, AudioConfig audioConfig) { + + SynthesizeSpeechRequest request = + SynthesizeSpeechRequest.newBuilder() + .setInput(input) + .setVoice(voice) + .setAudioConfig(audioConfig) + .build(); + return synthesizeSpeech(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Synthesizes speech synchronously: receive results after all text input has been processed. + * + *

Sample code: + * + *


+   * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+   *   SynthesisInput input = SynthesisInput.newBuilder().build();
+   *   VoiceSelectionParams voice = VoiceSelectionParams.newBuilder().build();
+   *   AudioConfig audioConfig = AudioConfig.newBuilder().build();
+   *   SynthesizeSpeechRequest request = SynthesizeSpeechRequest.newBuilder()
+   *     .setInput(input)
+   *     .setVoice(voice)
+   *     .setAudioConfig(audioConfig)
+   *     .build();
+   *   SynthesizeSpeechResponse response = textToSpeechClient.synthesizeSpeech(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final SynthesizeSpeechResponse synthesizeSpeech(SynthesizeSpeechRequest request) { + return synthesizeSpeechCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Synthesizes speech synchronously: receive results after all text input has been processed. + * + *

Sample code: + * + *


+   * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+   *   SynthesisInput input = SynthesisInput.newBuilder().build();
+   *   VoiceSelectionParams voice = VoiceSelectionParams.newBuilder().build();
+   *   AudioConfig audioConfig = AudioConfig.newBuilder().build();
+   *   SynthesizeSpeechRequest request = SynthesizeSpeechRequest.newBuilder()
+   *     .setInput(input)
+   *     .setVoice(voice)
+   *     .setAudioConfig(audioConfig)
+   *     .build();
+   *   ApiFuture<SynthesizeSpeechResponse> future = textToSpeechClient.synthesizeSpeechCallable().futureCall(request);
+   *   // Do something
+   *   SynthesizeSpeechResponse response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable + synthesizeSpeechCallable() { + return stub.synthesizeSpeechCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechSettings.java b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechSettings.java new file mode 100644 index 000000000000..3ea6ca2f89d6 --- /dev/null +++ b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechSettings.java @@ -0,0 +1,186 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.texttospeech.v1; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.texttospeech.v1.stub.TextToSpeechStubSettings; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Settings class to configure an instance of {@link TextToSpeechClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (texttospeech.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. For + * example, to set the total timeout of listVoices to 30 seconds: + * + *

+ * 
+ * TextToSpeechSettings.Builder textToSpeechSettingsBuilder =
+ *     TextToSpeechSettings.newBuilder();
+ * textToSpeechSettingsBuilder.listVoicesSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * TextToSpeechSettings textToSpeechSettings = textToSpeechSettingsBuilder.build();
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class TextToSpeechSettings extends ClientSettings { + /** Returns the object with the settings used for calls to listVoices. */ + public UnaryCallSettings listVoicesSettings() { + return ((TextToSpeechStubSettings) getStubSettings()).listVoicesSettings(); + } + + /** Returns the object with the settings used for calls to synthesizeSpeech. */ + public UnaryCallSettings + synthesizeSpeechSettings() { + return ((TextToSpeechStubSettings) getStubSettings()).synthesizeSpeechSettings(); + } + + public static final TextToSpeechSettings create(TextToSpeechStubSettings stub) + throws IOException { + return new TextToSpeechSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return TextToSpeechStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return TextToSpeechStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return TextToSpeechStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return TextToSpeechStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return TextToSpeechStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return TextToSpeechStubSettings.defaultTransportChannelProvider(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return TextToSpeechStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected TextToSpeechSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for TextToSpeechSettings. */ + public static class Builder extends ClientSettings.Builder { + protected Builder() throws IOException { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(TextToSpeechStubSettings.newBuilder(clientContext)); + } + + private static Builder createDefault() { + return new Builder(TextToSpeechStubSettings.newBuilder()); + } + + protected Builder(TextToSpeechSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(TextToSpeechStubSettings.Builder stubSettings) { + super(stubSettings); + } + + public TextToSpeechStubSettings.Builder getStubSettingsBuilder() { + return ((TextToSpeechStubSettings.Builder) getStubSettings()); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to listVoices. */ + public UnaryCallSettings.Builder listVoicesSettings() { + return getStubSettingsBuilder().listVoicesSettings(); + } + + /** Returns the builder for the settings used for calls to synthesizeSpeech. */ + public UnaryCallSettings.Builder + synthesizeSpeechSettings() { + return getStubSettingsBuilder().synthesizeSpeechSettings(); + } + + @Override + public TextToSpeechSettings build() throws IOException { + return new TextToSpeechSettings(this); + } + } +} diff --git a/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/package-info.java b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/package-info.java new file mode 100644 index 000000000000..812536497545 --- /dev/null +++ b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/package-info.java @@ -0,0 +1,37 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to Cloud Text-to-Speech API. + * + *

The interfaces provided are listed below, along with usage samples. + * + *

================== TextToSpeechClient ================== + * + *

Service Description: Service that implements Google Cloud Text-to-Speech API. + * + *

Sample for TextToSpeechClient: + * + *

+ * 
+ * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+ *   String languageCode = "";
+ *   ListVoicesResponse response = textToSpeechClient.listVoices(languageCode);
+ * }
+ * 
+ * 
+ */ +package com.google.cloud.texttospeech.v1; diff --git a/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/GrpcTextToSpeechCallableFactory.java b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/GrpcTextToSpeechCallableFactory.java new file mode 100644 index 000000000000..efcd67d7c97f --- /dev/null +++ b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/GrpcTextToSpeechCallableFactory.java @@ -0,0 +1,116 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.texttospeech.v1.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * gRPC callable factory implementation for Cloud Text-to-Speech API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator") +@BetaApi("The surface for use by generated code is not stable yet and may change in the future.") +public class GrpcTextToSpeechCallableFactory implements GrpcStubCallableFactory { + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings pagedCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable( + grpcCallSettings, pagedCallSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings batchingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, batchingCallSettings, clientContext); + } + + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings operationCallSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, operationCallSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } +} diff --git a/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/GrpcTextToSpeechStub.java b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/GrpcTextToSpeechStub.java new file mode 100644 index 000000000000..08e7f70d2239 --- /dev/null +++ b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/GrpcTextToSpeechStub.java @@ -0,0 +1,167 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.texttospeech.v1.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.texttospeech.v1.ListVoicesRequest; +import com.google.cloud.texttospeech.v1.ListVoicesResponse; +import com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest; +import com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * gRPC stub implementation for Cloud Text-to-Speech API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator") +@BetaApi("A restructuring of stub classes is planned, so this may break in the future") +public class GrpcTextToSpeechStub extends TextToSpeechStub { + + private static final MethodDescriptor + listVoicesMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.texttospeech.v1.TextToSpeech/ListVoices") + .setRequestMarshaller(ProtoUtils.marshaller(ListVoicesRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ListVoicesResponse.getDefaultInstance())) + .build(); + private static final MethodDescriptor + synthesizeSpeechMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.cloud.texttospeech.v1.TextToSpeech/SynthesizeSpeech") + .setRequestMarshaller( + ProtoUtils.marshaller(SynthesizeSpeechRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(SynthesizeSpeechResponse.getDefaultInstance())) + .build(); + + private final BackgroundResource backgroundResources; + + private final UnaryCallable listVoicesCallable; + private final UnaryCallable + synthesizeSpeechCallable; + + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcTextToSpeechStub create(TextToSpeechStubSettings settings) + throws IOException { + return new GrpcTextToSpeechStub(settings, ClientContext.create(settings)); + } + + public static final GrpcTextToSpeechStub create(ClientContext clientContext) throws IOException { + return new GrpcTextToSpeechStub(TextToSpeechStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcTextToSpeechStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcTextToSpeechStub( + TextToSpeechStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcTextToSpeechStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcTextToSpeechStub(TextToSpeechStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcTextToSpeechCallableFactory()); + } + + /** + * Constructs an instance of GrpcTextToSpeechStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcTextToSpeechStub( + TextToSpeechStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + + GrpcCallSettings listVoicesTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listVoicesMethodDescriptor) + .build(); + GrpcCallSettings + synthesizeSpeechTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(synthesizeSpeechMethodDescriptor) + .build(); + + this.listVoicesCallable = + callableFactory.createUnaryCallable( + listVoicesTransportSettings, settings.listVoicesSettings(), clientContext); + this.synthesizeSpeechCallable = + callableFactory.createUnaryCallable( + synthesizeSpeechTransportSettings, settings.synthesizeSpeechSettings(), clientContext); + + backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public UnaryCallable listVoicesCallable() { + return listVoicesCallable; + } + + public UnaryCallable + synthesizeSpeechCallable() { + return synthesizeSpeechCallable; + } + + @Override + public final void close() { + shutdown(); + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/TextToSpeechStub.java b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/TextToSpeechStub.java new file mode 100644 index 000000000000..f75175223ea7 --- /dev/null +++ b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/TextToSpeechStub.java @@ -0,0 +1,48 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.texttospeech.v1.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.texttospeech.v1.ListVoicesRequest; +import com.google.cloud.texttospeech.v1.ListVoicesResponse; +import com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest; +import com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Base stub class for Cloud Text-to-Speech API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator") +@BetaApi("A restructuring of stub classes is planned, so this may break in the future") +public abstract class TextToSpeechStub implements BackgroundResource { + + public UnaryCallable listVoicesCallable() { + throw new UnsupportedOperationException("Not implemented: listVoicesCallable()"); + } + + public UnaryCallable + synthesizeSpeechCallable() { + throw new UnsupportedOperationException("Not implemented: synthesizeSpeechCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/TextToSpeechStubSettings.java b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/TextToSpeechStubSettings.java new file mode 100644 index 000000000000..932079d562c0 --- /dev/null +++ b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/TextToSpeechStubSettings.java @@ -0,0 +1,294 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.texttospeech.v1.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.texttospeech.v1.ListVoicesRequest; +import com.google.cloud.texttospeech.v1.ListVoicesResponse; +import com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest; +import com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; +import org.threeten.bp.Duration; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Settings class to configure an instance of {@link TextToSpeechStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (texttospeech.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. For + * example, to set the total timeout of listVoices to 30 seconds: + * + *

+ * 
+ * TextToSpeechStubSettings.Builder textToSpeechSettingsBuilder =
+ *     TextToSpeechStubSettings.newBuilder();
+ * textToSpeechSettingsBuilder.listVoicesSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * TextToSpeechStubSettings textToSpeechSettings = textToSpeechSettingsBuilder.build();
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class TextToSpeechStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder().add("https://www.googleapis.com/auth/cloud-platform").build(); + + private final UnaryCallSettings listVoicesSettings; + private final UnaryCallSettings + synthesizeSpeechSettings; + + /** Returns the object with the settings used for calls to listVoices. */ + public UnaryCallSettings listVoicesSettings() { + return listVoicesSettings; + } + + /** Returns the object with the settings used for calls to synthesizeSpeech. */ + public UnaryCallSettings + synthesizeSpeechSettings() { + return synthesizeSpeechSettings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public TextToSpeechStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcTextToSpeechStub.create(this); + } else { + throw new UnsupportedOperationException( + "Transport not supported: " + getTransportChannelProvider().getTransportName()); + } + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return "texttospeech.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder().setScopesToApply(DEFAULT_SERVICE_SCOPES); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(TextToSpeechStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected TextToSpeechStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + listVoicesSettings = settingsBuilder.listVoicesSettings().build(); + synthesizeSpeechSettings = settingsBuilder.synthesizeSpeechSettings().build(); + } + + /** Builder for TextToSpeechStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + + private final UnaryCallSettings.Builder + listVoicesSettings; + private final UnaryCallSettings.Builder + synthesizeSpeechSettings; + + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "idempotent", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + definitions.put("non_idempotent", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelay(Duration.ofMillis(60000L)) + .setInitialRpcTimeout(Duration.ofMillis(20000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofMillis(20000L)) + .setTotalTimeout(Duration.ofMillis(600000L)) + .build(); + definitions.put("default", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + listVoicesSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + synthesizeSpeechSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + listVoicesSettings, synthesizeSpeechSettings); + + initDefaults(this); + } + + private static Builder createDefault() { + Builder builder = new Builder((ClientContext) null); + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setEndpoint(getDefaultEndpoint()); + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + + builder + .listVoicesSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .synthesizeSpeechSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + return builder; + } + + protected Builder(TextToSpeechStubSettings settings) { + super(settings); + + listVoicesSettings = settings.listVoicesSettings.toBuilder(); + synthesizeSpeechSettings = settings.synthesizeSpeechSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + listVoicesSettings, synthesizeSpeechSettings); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to listVoices. */ + public UnaryCallSettings.Builder listVoicesSettings() { + return listVoicesSettings; + } + + /** Returns the builder for the settings used for calls to synthesizeSpeech. */ + public UnaryCallSettings.Builder + synthesizeSpeechSettings() { + return synthesizeSpeechSettings; + } + + @Override + public TextToSpeechStubSettings build() throws IOException { + return new TextToSpeechStubSettings(this); + } + } +} diff --git a/google-cloud-clients/google-cloud-texttospeech/src/test/java/com/google/cloud/texttospeech/v1/MockTextToSpeech.java b/google-cloud-clients/google-cloud-texttospeech/src/test/java/com/google/cloud/texttospeech/v1/MockTextToSpeech.java new file mode 100644 index 000000000000..6b311dc30c86 --- /dev/null +++ b/google-cloud-clients/google-cloud-texttospeech/src/test/java/com/google/cloud/texttospeech/v1/MockTextToSpeech.java @@ -0,0 +1,57 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.texttospeech.v1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.GeneratedMessageV3; +import io.grpc.ServerServiceDefinition; +import java.util.List; + +@javax.annotation.Generated("by GAPIC") +@BetaApi +public class MockTextToSpeech implements MockGrpcService { + private final MockTextToSpeechImpl serviceImpl; + + public MockTextToSpeech() { + serviceImpl = new MockTextToSpeechImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(GeneratedMessageV3 response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/google-cloud-clients/google-cloud-texttospeech/src/test/java/com/google/cloud/texttospeech/v1/MockTextToSpeechImpl.java b/google-cloud-clients/google-cloud-texttospeech/src/test/java/com/google/cloud/texttospeech/v1/MockTextToSpeechImpl.java new file mode 100644 index 000000000000..419597e9bdcb --- /dev/null +++ b/google-cloud-clients/google-cloud-texttospeech/src/test/java/com/google/cloud/texttospeech/v1/MockTextToSpeechImpl.java @@ -0,0 +1,88 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.texttospeech.v1; + +import com.google.api.core.BetaApi; +import com.google.cloud.texttospeech.v1.TextToSpeechGrpc.TextToSpeechImplBase; +import com.google.protobuf.GeneratedMessageV3; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; + +@javax.annotation.Generated("by GAPIC") +@BetaApi +public class MockTextToSpeechImpl extends TextToSpeechImplBase { + private ArrayList requests; + private Queue responses; + + public MockTextToSpeechImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(GeneratedMessageV3 response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void listVoices( + ListVoicesRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof ListVoicesResponse) { + requests.add(request); + responseObserver.onNext((ListVoicesResponse) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void synthesizeSpeech( + SynthesizeSpeechRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof SynthesizeSpeechResponse) { + requests.add(request); + responseObserver.onNext((SynthesizeSpeechResponse) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } +} diff --git a/google-cloud-clients/google-cloud-texttospeech/src/test/java/com/google/cloud/texttospeech/v1/TextToSpeechClientTest.java b/google-cloud-clients/google-cloud-texttospeech/src/test/java/com/google/cloud/texttospeech/v1/TextToSpeechClientTest.java new file mode 100644 index 000000000000..6a133ee21e21 --- /dev/null +++ b/google-cloud-clients/google-cloud-texttospeech/src/test/java/com/google/cloud/texttospeech/v1/TextToSpeechClientTest.java @@ -0,0 +1,159 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.texttospeech.v1; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.protobuf.ByteString; +import com.google.protobuf.GeneratedMessageV3; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@javax.annotation.Generated("by GAPIC") +public class TextToSpeechClientTest { + private static MockTextToSpeech mockTextToSpeech; + private static MockServiceHelper serviceHelper; + private TextToSpeechClient client; + private LocalChannelProvider channelProvider; + + @BeforeClass + public static void startStaticServer() { + mockTextToSpeech = new MockTextToSpeech(); + serviceHelper = + new MockServiceHelper("in-process-1", Arrays.asList(mockTextToSpeech)); + serviceHelper.start(); + } + + @AfterClass + public static void stopServer() { + serviceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + serviceHelper.reset(); + channelProvider = serviceHelper.createChannelProvider(); + TextToSpeechSettings settings = + TextToSpeechSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = TextToSpeechClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + @SuppressWarnings("all") + public void listVoicesTest() { + ListVoicesResponse expectedResponse = ListVoicesResponse.newBuilder().build(); + mockTextToSpeech.addResponse(expectedResponse); + + String languageCode = "languageCode-412800396"; + + ListVoicesResponse actualResponse = client.listVoices(languageCode); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockTextToSpeech.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListVoicesRequest actualRequest = (ListVoicesRequest) actualRequests.get(0); + + Assert.assertEquals(languageCode, actualRequest.getLanguageCode()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void listVoicesExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockTextToSpeech.addException(exception); + + try { + String languageCode = "languageCode-412800396"; + + client.listVoices(languageCode); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } + + @Test + @SuppressWarnings("all") + public void synthesizeSpeechTest() { + ByteString audioContent = ByteString.copyFromUtf8("16"); + SynthesizeSpeechResponse expectedResponse = + SynthesizeSpeechResponse.newBuilder().setAudioContent(audioContent).build(); + mockTextToSpeech.addResponse(expectedResponse); + + SynthesisInput input = SynthesisInput.newBuilder().build(); + VoiceSelectionParams voice = VoiceSelectionParams.newBuilder().build(); + AudioConfig audioConfig = AudioConfig.newBuilder().build(); + + SynthesizeSpeechResponse actualResponse = client.synthesizeSpeech(input, voice, audioConfig); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockTextToSpeech.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + SynthesizeSpeechRequest actualRequest = (SynthesizeSpeechRequest) actualRequests.get(0); + + Assert.assertEquals(input, actualRequest.getInput()); + Assert.assertEquals(voice, actualRequest.getVoice()); + Assert.assertEquals(audioConfig, actualRequest.getAudioConfig()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void synthesizeSpeechExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockTextToSpeech.addException(exception); + + try { + SynthesisInput input = SynthesisInput.newBuilder().build(); + VoiceSelectionParams voice = VoiceSelectionParams.newBuilder().build(); + AudioConfig audioConfig = AudioConfig.newBuilder().build(); + + client.synthesizeSpeech(input, voice, audioConfig); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } +} diff --git a/google-cloud-clients/pom.xml b/google-cloud-clients/pom.xml index 6450bf728591..baf14606a0b2 100644 --- a/google-cloud-clients/pom.xml +++ b/google-cloud-clients/pom.xml @@ -751,7 +751,7 @@ Stub packages - com.google.cloud.bigquerydatatransfer.v1.stub:com.google.cloud.container.v1.stub:com.google.cloud.dlp.v2beta1.stub:com.google.cloud.dlp.v2.stub:com.google.cloud.dialogflow.v2beta1.stub:com.google.cloud.dialogflow.v2.stub:com.google.cloud.errorreporting.v1beta1.stub:com.google.cloud.firestore.v1beta1.stub:com.google.cloud.language.v1beta2.stub:com.google.cloud.language.v1.stub:com.google.cloud.logging.v2.stub:com.google.cloud.monitoring.v3.stub:com.google.cloud.pubsub.v1.stub:com.google.cloud.speech.v1beta1.stub:com.google.cloud.speech.v1.stub:com.google.cloud.texttospeech.v1beta1.stub:com.google.cloud.trace.v1.stub:com.google.cloud.trace.v2.stub:com.google.cloud.videointelligence.v1beta1.stub:com.google.cloud.videointelligence.v1beta2.stub:com.google.cloud.videointelligence.v1.stub:com.google.cloud.videointelligence.v1p1beta1.stub:com.google.cloud.vision.v1.stub:com.google.cloud.vision.v1p1beta1.stub:com.google.cloud.vision.v1p2beta1.stub + com.google.cloud.bigquerydatatransfer.v1.stub:com.google.cloud.container.v1.stub:com.google.cloud.dlp.v2beta1.stub:com.google.cloud.dlp.v2.stub:com.google.cloud.dialogflow.v2beta1.stub:com.google.cloud.dialogflow.v2.stub:com.google.cloud.errorreporting.v1beta1.stub:com.google.cloud.firestore.v1beta1.stub:com.google.cloud.language.v1beta2.stub:com.google.cloud.language.v1.stub:com.google.cloud.logging.v2.stub:com.google.cloud.monitoring.v3.stub:com.google.cloud.pubsub.v1.stub:com.google.cloud.speech.v1beta1.stub:com.google.cloud.speech.v1.stub:com.google.cloud.texttospeech.v1beta1.stub:com.google.cloud.texttospeech.v1.stub:com.google.cloud.trace.v1.stub:com.google.cloud.trace.v2.stub:com.google.cloud.videointelligence.v1beta1.stub:com.google.cloud.videointelligence.v1beta2.stub:com.google.cloud.videointelligence.v1.stub:com.google.cloud.videointelligence.v1p1beta1.stub:com.google.cloud.vision.v1.stub:com.google.cloud.vision.v1p1beta1.stub:com.google.cloud.vision.v1p2beta1.stub Deprecated packages