diff --git a/google-api-grpc/grpc-google-cloud-texttospeech-v1/pom.xml b/google-api-grpc/grpc-google-cloud-texttospeech-v1/pom.xml
new file mode 100644
index 000000000000..e19d75119bc2
--- /dev/null
+++ b/google-api-grpc/grpc-google-cloud-texttospeech-v1/pom.xml
@@ -0,0 +1,31 @@
+
+ * Service that implements Google Cloud Text-to-Speech API. + *+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler (version 1.10.0)", + comments = "Source: google/cloud/texttospeech/v1/cloud_tts.proto") +public final class TextToSpeechGrpc { + + private TextToSpeechGrpc() {} + + public static final String SERVICE_NAME = "google.cloud.texttospeech.v1.TextToSpeech"; + + // Static method descriptors that strictly reflect the proto. + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getListVoicesMethod()} instead. + public static final io.grpc.MethodDescriptor
+ * Service that implements Google Cloud Text-to-Speech API. + *+ */ + public static abstract class TextToSpeechImplBase implements io.grpc.BindableService { + + /** + *
+ * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice] + * supported for synthesis. + *+ */ + public void listVoices(com.google.cloud.texttospeech.v1.ListVoicesRequest request, + io.grpc.stub.StreamObserver
+ * Synthesizes speech synchronously: receive results after all text input + * has been processed. + *+ */ + public void synthesizeSpeech(com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest request, + io.grpc.stub.StreamObserver
+ * Service that implements Google Cloud Text-to-Speech API. + *+ */ + public static final class TextToSpeechStub extends io.grpc.stub.AbstractStub
+ * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice] + * supported for synthesis. + *+ */ + public void listVoices(com.google.cloud.texttospeech.v1.ListVoicesRequest request, + io.grpc.stub.StreamObserver
+ * Synthesizes speech synchronously: receive results after all text input + * has been processed. + *+ */ + public void synthesizeSpeech(com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest request, + io.grpc.stub.StreamObserver
+ * Service that implements Google Cloud Text-to-Speech API. + *+ */ + public static final class TextToSpeechBlockingStub extends io.grpc.stub.AbstractStub
+ * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice] + * supported for synthesis. + *+ */ + public com.google.cloud.texttospeech.v1.ListVoicesResponse listVoices(com.google.cloud.texttospeech.v1.ListVoicesRequest request) { + return blockingUnaryCall( + getChannel(), getListVoicesMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Synthesizes speech synchronously: receive results after all text input + * has been processed. + *+ */ + public com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse synthesizeSpeech(com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest request) { + return blockingUnaryCall( + getChannel(), getSynthesizeSpeechMethodHelper(), getCallOptions(), request); + } + } + + /** + *
+ * Service that implements Google Cloud Text-to-Speech API. + *+ */ + public static final class TextToSpeechFutureStub extends io.grpc.stub.AbstractStub
+ * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice] + * supported for synthesis. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Synthesizes speech synchronously: receive results after all text input + * has been processed. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Description of audio data to be synthesized. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.AudioConfig} + */ +public final class AudioConfig extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.AudioConfig) + AudioConfigOrBuilder { +private static final long serialVersionUID = 0L; + // Use AudioConfig.newBuilder() to construct. + private AudioConfig(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + private AudioConfig() { + audioEncoding_ = 0; + speakingRate_ = 0D; + pitch_ = 0D; + volumeGainDb_ = 0D; + sampleRateHertz_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AudioConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + + audioEncoding_ = rawValue; + break; + } + case 17: { + + speakingRate_ = input.readDouble(); + break; + } + case 25: { + + pitch_ = input.readDouble(); + break; + } + case 33: { + + volumeGainDb_ = input.readDouble(); + break; + } + case 40: { + + sampleRateHertz_ = input.readInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_AudioConfig_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_AudioConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.AudioConfig.class, com.google.cloud.texttospeech.v1.AudioConfig.Builder.class); + } + + public static final int AUDIO_ENCODING_FIELD_NUMBER = 1; + private int audioEncoding_; + /** + *
+ * Required. The format of the requested audio byte stream. + *+ * + *
.google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1;
+ */
+ public int getAudioEncodingValue() {
+ return audioEncoding_;
+ }
+ /**
+ * + * Required. The format of the requested audio byte stream. + *+ * + *
.google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1;
+ */
+ public com.google.cloud.texttospeech.v1.AudioEncoding getAudioEncoding() {
+ com.google.cloud.texttospeech.v1.AudioEncoding result = com.google.cloud.texttospeech.v1.AudioEncoding.valueOf(audioEncoding_);
+ return result == null ? com.google.cloud.texttospeech.v1.AudioEncoding.UNRECOGNIZED : result;
+ }
+
+ public static final int SPEAKING_RATE_FIELD_NUMBER = 2;
+ private double speakingRate_;
+ /**
+ * + * Optional speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal + * native speed supported by the specific voice. 2.0 is twice as fast, and + * 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any + * other values < 0.25 or > 4.0 will return an error. + *+ * + *
double speaking_rate = 2;
+ */
+ public double getSpeakingRate() {
+ return speakingRate_;
+ }
+
+ public static final int PITCH_FIELD_NUMBER = 3;
+ private double pitch_;
+ /**
+ * + * Optional speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20 + * semitones from the original pitch. -20 means decrease 20 semitones from the + * original pitch. + *+ * + *
double pitch = 3;
+ */
+ public double getPitch() {
+ return pitch_;
+ }
+
+ public static final int VOLUME_GAIN_DB_FIELD_NUMBER = 4;
+ private double volumeGainDb_;
+ /**
+ * + * Optional volume gain (in dB) of the normal native volume supported by the + * specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of + * 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB) + * will play at approximately half the amplitude of the normal native signal + * amplitude. A value of +6.0 (dB) will play at approximately twice the + * amplitude of the normal native signal amplitude. Strongly recommend not to + * exceed +10 (dB) as there's usually no effective increase in loudness for + * any value greater than that. + *+ * + *
double volume_gain_db = 4;
+ */
+ public double getVolumeGainDb() {
+ return volumeGainDb_;
+ }
+
+ public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER = 5;
+ private int sampleRateHertz_;
+ /**
+ * + * The synthesis sample rate (in hertz) for this audio. Optional. If this is + * different from the voice's natural sample rate, then the synthesizer will + * honor this request by converting to the desired sample rate (which might + * result in worse audio quality), unless the specified sample rate is not + * supported for the encoding chosen, in which case it will fail the request + * and return [google.rpc.Code.INVALID_ARGUMENT][]. + *+ * + *
int32 sample_rate_hertz = 5;
+ */
+ public int getSampleRateHertz() {
+ return sampleRateHertz_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (audioEncoding_ != com.google.cloud.texttospeech.v1.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED.getNumber()) {
+ output.writeEnum(1, audioEncoding_);
+ }
+ if (speakingRate_ != 0D) {
+ output.writeDouble(2, speakingRate_);
+ }
+ if (pitch_ != 0D) {
+ output.writeDouble(3, pitch_);
+ }
+ if (volumeGainDb_ != 0D) {
+ output.writeDouble(4, volumeGainDb_);
+ }
+ if (sampleRateHertz_ != 0) {
+ output.writeInt32(5, sampleRateHertz_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (audioEncoding_ != com.google.cloud.texttospeech.v1.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED.getNumber()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(1, audioEncoding_);
+ }
+ if (speakingRate_ != 0D) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(2, speakingRate_);
+ }
+ if (pitch_ != 0D) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(3, pitch_);
+ }
+ if (volumeGainDb_ != 0D) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(4, volumeGainDb_);
+ }
+ if (sampleRateHertz_ != 0) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(5, sampleRateHertz_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.texttospeech.v1.AudioConfig)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.texttospeech.v1.AudioConfig other = (com.google.cloud.texttospeech.v1.AudioConfig) obj;
+
+ boolean result = true;
+ result = result && audioEncoding_ == other.audioEncoding_;
+ result = result && (
+ java.lang.Double.doubleToLongBits(getSpeakingRate())
+ == java.lang.Double.doubleToLongBits(
+ other.getSpeakingRate()));
+ result = result && (
+ java.lang.Double.doubleToLongBits(getPitch())
+ == java.lang.Double.doubleToLongBits(
+ other.getPitch()));
+ result = result && (
+ java.lang.Double.doubleToLongBits(getVolumeGainDb())
+ == java.lang.Double.doubleToLongBits(
+ other.getVolumeGainDb()));
+ result = result && (getSampleRateHertz()
+ == other.getSampleRateHertz());
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + AUDIO_ENCODING_FIELD_NUMBER;
+ hash = (53 * hash) + audioEncoding_;
+ hash = (37 * hash) + SPEAKING_RATE_FIELD_NUMBER;
+ hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
+ java.lang.Double.doubleToLongBits(getSpeakingRate()));
+ hash = (37 * hash) + PITCH_FIELD_NUMBER;
+ hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
+ java.lang.Double.doubleToLongBits(getPitch()));
+ hash = (37 * hash) + VOLUME_GAIN_DB_FIELD_NUMBER;
+ hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
+ java.lang.Double.doubleToLongBits(getVolumeGainDb()));
+ hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER;
+ hash = (53 * hash) + getSampleRateHertz();
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom(
+ java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom(
+ java.nio.ByteBuffer data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.AudioConfig parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.AudioConfig parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.AudioConfig parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(com.google.cloud.texttospeech.v1.AudioConfig prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * + * Description of audio data to be synthesized. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.AudioConfig} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder
+ * Required. The format of the requested audio byte stream. + *+ * + *
.google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1;
+ */
+ public int getAudioEncodingValue() {
+ return audioEncoding_;
+ }
+ /**
+ * + * Required. The format of the requested audio byte stream. + *+ * + *
.google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1;
+ */
+ public Builder setAudioEncodingValue(int value) {
+ audioEncoding_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Required. The format of the requested audio byte stream. + *+ * + *
.google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1;
+ */
+ public com.google.cloud.texttospeech.v1.AudioEncoding getAudioEncoding() {
+ com.google.cloud.texttospeech.v1.AudioEncoding result = com.google.cloud.texttospeech.v1.AudioEncoding.valueOf(audioEncoding_);
+ return result == null ? com.google.cloud.texttospeech.v1.AudioEncoding.UNRECOGNIZED : result;
+ }
+ /**
+ * + * Required. The format of the requested audio byte stream. + *+ * + *
.google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1;
+ */
+ public Builder setAudioEncoding(com.google.cloud.texttospeech.v1.AudioEncoding value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ audioEncoding_ = value.getNumber();
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Required. The format of the requested audio byte stream. + *+ * + *
.google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1;
+ */
+ public Builder clearAudioEncoding() {
+
+ audioEncoding_ = 0;
+ onChanged();
+ return this;
+ }
+
+ private double speakingRate_ ;
+ /**
+ * + * Optional speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal + * native speed supported by the specific voice. 2.0 is twice as fast, and + * 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any + * other values < 0.25 or > 4.0 will return an error. + *+ * + *
double speaking_rate = 2;
+ */
+ public double getSpeakingRate() {
+ return speakingRate_;
+ }
+ /**
+ * + * Optional speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal + * native speed supported by the specific voice. 2.0 is twice as fast, and + * 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any + * other values < 0.25 or > 4.0 will return an error. + *+ * + *
double speaking_rate = 2;
+ */
+ public Builder setSpeakingRate(double value) {
+
+ speakingRate_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Optional speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal + * native speed supported by the specific voice. 2.0 is twice as fast, and + * 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any + * other values < 0.25 or > 4.0 will return an error. + *+ * + *
double speaking_rate = 2;
+ */
+ public Builder clearSpeakingRate() {
+
+ speakingRate_ = 0D;
+ onChanged();
+ return this;
+ }
+
+ private double pitch_ ;
+ /**
+ * + * Optional speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20 + * semitones from the original pitch. -20 means decrease 20 semitones from the + * original pitch. + *+ * + *
double pitch = 3;
+ */
+ public double getPitch() {
+ return pitch_;
+ }
+ /**
+ * + * Optional speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20 + * semitones from the original pitch. -20 means decrease 20 semitones from the + * original pitch. + *+ * + *
double pitch = 3;
+ */
+ public Builder setPitch(double value) {
+
+ pitch_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Optional speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20 + * semitones from the original pitch. -20 means decrease 20 semitones from the + * original pitch. + *+ * + *
double pitch = 3;
+ */
+ public Builder clearPitch() {
+
+ pitch_ = 0D;
+ onChanged();
+ return this;
+ }
+
+ private double volumeGainDb_ ;
+ /**
+ * + * Optional volume gain (in dB) of the normal native volume supported by the + * specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of + * 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB) + * will play at approximately half the amplitude of the normal native signal + * amplitude. A value of +6.0 (dB) will play at approximately twice the + * amplitude of the normal native signal amplitude. Strongly recommend not to + * exceed +10 (dB) as there's usually no effective increase in loudness for + * any value greater than that. + *+ * + *
double volume_gain_db = 4;
+ */
+ public double getVolumeGainDb() {
+ return volumeGainDb_;
+ }
+ /**
+ * + * Optional volume gain (in dB) of the normal native volume supported by the + * specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of + * 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB) + * will play at approximately half the amplitude of the normal native signal + * amplitude. A value of +6.0 (dB) will play at approximately twice the + * amplitude of the normal native signal amplitude. Strongly recommend not to + * exceed +10 (dB) as there's usually no effective increase in loudness for + * any value greater than that. + *+ * + *
double volume_gain_db = 4;
+ */
+ public Builder setVolumeGainDb(double value) {
+
+ volumeGainDb_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Optional volume gain (in dB) of the normal native volume supported by the + * specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of + * 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB) + * will play at approximately half the amplitude of the normal native signal + * amplitude. A value of +6.0 (dB) will play at approximately twice the + * amplitude of the normal native signal amplitude. Strongly recommend not to + * exceed +10 (dB) as there's usually no effective increase in loudness for + * any value greater than that. + *+ * + *
double volume_gain_db = 4;
+ */
+ public Builder clearVolumeGainDb() {
+
+ volumeGainDb_ = 0D;
+ onChanged();
+ return this;
+ }
+
+ private int sampleRateHertz_ ;
+ /**
+ * + * The synthesis sample rate (in hertz) for this audio. Optional. If this is + * different from the voice's natural sample rate, then the synthesizer will + * honor this request by converting to the desired sample rate (which might + * result in worse audio quality), unless the specified sample rate is not + * supported for the encoding chosen, in which case it will fail the request + * and return [google.rpc.Code.INVALID_ARGUMENT][]. + *+ * + *
int32 sample_rate_hertz = 5;
+ */
+ public int getSampleRateHertz() {
+ return sampleRateHertz_;
+ }
+ /**
+ * + * The synthesis sample rate (in hertz) for this audio. Optional. If this is + * different from the voice's natural sample rate, then the synthesizer will + * honor this request by converting to the desired sample rate (which might + * result in worse audio quality), unless the specified sample rate is not + * supported for the encoding chosen, in which case it will fail the request + * and return [google.rpc.Code.INVALID_ARGUMENT][]. + *+ * + *
int32 sample_rate_hertz = 5;
+ */
+ public Builder setSampleRateHertz(int value) {
+
+ sampleRateHertz_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The synthesis sample rate (in hertz) for this audio. Optional. If this is + * different from the voice's natural sample rate, then the synthesizer will + * honor this request by converting to the desired sample rate (which might + * result in worse audio quality), unless the specified sample rate is not + * supported for the encoding chosen, in which case it will fail the request + * and return [google.rpc.Code.INVALID_ARGUMENT][]. + *+ * + *
int32 sample_rate_hertz = 5;
+ */
+ public Builder clearSampleRateHertz() {
+
+ sampleRateHertz_ = 0;
+ onChanged();
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFieldsProto3(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.AudioConfig)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.AudioConfig)
+ private static final com.google.cloud.texttospeech.v1.AudioConfig DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.AudioConfig();
+ }
+
+ public static com.google.cloud.texttospeech.v1.AudioConfig getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * Required. The format of the requested audio byte stream. + *+ * + *
.google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1;
+ */
+ int getAudioEncodingValue();
+ /**
+ * + * Required. The format of the requested audio byte stream. + *+ * + *
.google.cloud.texttospeech.v1.AudioEncoding audio_encoding = 1;
+ */
+ com.google.cloud.texttospeech.v1.AudioEncoding getAudioEncoding();
+
+ /**
+ * + * Optional speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal + * native speed supported by the specific voice. 2.0 is twice as fast, and + * 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any + * other values < 0.25 or > 4.0 will return an error. + *+ * + *
double speaking_rate = 2;
+ */
+ double getSpeakingRate();
+
+ /**
+ * + * Optional speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20 + * semitones from the original pitch. -20 means decrease 20 semitones from the + * original pitch. + *+ * + *
double pitch = 3;
+ */
+ double getPitch();
+
+ /**
+ * + * Optional volume gain (in dB) of the normal native volume supported by the + * specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of + * 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB) + * will play at approximately half the amplitude of the normal native signal + * amplitude. A value of +6.0 (dB) will play at approximately twice the + * amplitude of the normal native signal amplitude. Strongly recommend not to + * exceed +10 (dB) as there's usually no effective increase in loudness for + * any value greater than that. + *+ * + *
double volume_gain_db = 4;
+ */
+ double getVolumeGainDb();
+
+ /**
+ * + * The synthesis sample rate (in hertz) for this audio. Optional. If this is + * different from the voice's natural sample rate, then the synthesizer will + * honor this request by converting to the desired sample rate (which might + * result in worse audio quality), unless the specified sample rate is not + * supported for the encoding chosen, in which case it will fail the request + * and return [google.rpc.Code.INVALID_ARGUMENT][]. + *+ * + *
int32 sample_rate_hertz = 5;
+ */
+ int getSampleRateHertz();
+}
diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/AudioEncoding.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/AudioEncoding.java
new file mode 100644
index 000000000000..adbeb00b61bf
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/AudioEncoding.java
@@ -0,0 +1,166 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/texttospeech/v1/cloud_tts.proto
+
+package com.google.cloud.texttospeech.v1;
+
+/**
+ * + * Configuration to set up audio encoder. The encoding determines the output + * audio format that we'd like. + *+ * + * Protobuf enum {@code google.cloud.texttospeech.v1.AudioEncoding} + */ +public enum AudioEncoding + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+ * Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][]. + *+ * + *
AUDIO_ENCODING_UNSPECIFIED = 0;
+ */
+ AUDIO_ENCODING_UNSPECIFIED(0),
+ /**
+ * + * Uncompressed 16-bit signed little-endian samples (Linear PCM). + * Audio content returned as LINEAR16 also contains a WAV header. + *+ * + *
LINEAR16 = 1;
+ */
+ LINEAR16(1),
+ /**
+ * + * MP3 audio. + *+ * + *
MP3 = 2;
+ */
+ MP3(2),
+ /**
+ * + * Opus encoded audio wrapped in an ogg container. The result will be a + * file which can be played natively on Android, and in browsers (at least + * Chrome and Firefox). The quality of the encoding is considerably higher + * than MP3 while using approximately the same bitrate. + *+ * + *
OGG_OPUS = 3;
+ */
+ OGG_OPUS(3),
+ UNRECOGNIZED(-1),
+ ;
+
+ /**
+ * + * Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][]. + *+ * + *
AUDIO_ENCODING_UNSPECIFIED = 0;
+ */
+ public static final int AUDIO_ENCODING_UNSPECIFIED_VALUE = 0;
+ /**
+ * + * Uncompressed 16-bit signed little-endian samples (Linear PCM). + * Audio content returned as LINEAR16 also contains a WAV header. + *+ * + *
LINEAR16 = 1;
+ */
+ public static final int LINEAR16_VALUE = 1;
+ /**
+ * + * MP3 audio. + *+ * + *
MP3 = 2;
+ */
+ public static final int MP3_VALUE = 2;
+ /**
+ * + * Opus encoded audio wrapped in an ogg container. The result will be a + * file which can be played natively on Android, and in browsers (at least + * Chrome and Firefox). The quality of the encoding is considerably higher + * than MP3 while using approximately the same bitrate. + *+ * + *
OGG_OPUS = 3;
+ */
+ public static final int OGG_OPUS_VALUE = 3;
+
+
+ public final int getNumber() {
+ if (this == UNRECOGNIZED) {
+ throw new java.lang.IllegalArgumentException(
+ "Can't get the number of an unknown enum value.");
+ }
+ return value;
+ }
+
+ /**
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+ @java.lang.Deprecated
+ public static AudioEncoding valueOf(int value) {
+ return forNumber(value);
+ }
+
+ public static AudioEncoding forNumber(int value) {
+ switch (value) {
+ case 0: return AUDIO_ENCODING_UNSPECIFIED;
+ case 1: return LINEAR16;
+ case 2: return MP3;
+ case 3: return OGG_OPUS;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap+ * The top-level message sent by the client for the `ListVoices` method. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.ListVoicesRequest} + */ +public final class ListVoicesRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.ListVoicesRequest) + ListVoicesRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use ListVoicesRequest.newBuilder() to construct. + private ListVoicesRequest(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + private ListVoicesRequest() { + languageCode_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListVoicesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + languageCode_ = s; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.ListVoicesRequest.class, com.google.cloud.texttospeech.v1.ListVoicesRequest.Builder.class); + } + + public static final int LANGUAGE_CODE_FIELD_NUMBER = 1; + private volatile java.lang.Object languageCode_; + /** + *
+ * Optional (but recommended) + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If + * specified, the ListVoices call will only return voices that can be used to + * synthesize this language_code. E.g. when specifying "en-NZ", you will get + * supported "en-*" voices; when specifying "no", you will get supported + * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" + * will also get supported "cmn-*" voices; specifying "zh-hk" will also get + * supported "yue-*" voices. + *+ * + *
string language_code = 1;
+ */
+ public java.lang.String getLanguageCode() {
+ java.lang.Object ref = languageCode_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ languageCode_ = s;
+ return s;
+ }
+ }
+ /**
+ * + * Optional (but recommended) + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If + * specified, the ListVoices call will only return voices that can be used to + * synthesize this language_code. E.g. when specifying "en-NZ", you will get + * supported "en-*" voices; when specifying "no", you will get supported + * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" + * will also get supported "cmn-*" voices; specifying "zh-hk" will also get + * supported "yue-*" voices. + *+ * + *
string language_code = 1;
+ */
+ public com.google.protobuf.ByteString
+ getLanguageCodeBytes() {
+ java.lang.Object ref = languageCode_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ languageCode_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (!getLanguageCodeBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, languageCode_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (!getLanguageCodeBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, languageCode_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.texttospeech.v1.ListVoicesRequest)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.texttospeech.v1.ListVoicesRequest other = (com.google.cloud.texttospeech.v1.ListVoicesRequest) obj;
+
+ boolean result = true;
+ result = result && getLanguageCode()
+ .equals(other.getLanguageCode());
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER;
+ hash = (53 * hash) + getLanguageCode().hashCode();
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom(
+ java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom(
+ java.nio.ByteBuffer data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(com.google.cloud.texttospeech.v1.ListVoicesRequest prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * + * The top-level message sent by the client for the `ListVoices` method. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.ListVoicesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder
+ * Optional (but recommended) + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If + * specified, the ListVoices call will only return voices that can be used to + * synthesize this language_code. E.g. when specifying "en-NZ", you will get + * supported "en-*" voices; when specifying "no", you will get supported + * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" + * will also get supported "cmn-*" voices; specifying "zh-hk" will also get + * supported "yue-*" voices. + *+ * + *
string language_code = 1;
+ */
+ public java.lang.String getLanguageCode() {
+ java.lang.Object ref = languageCode_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ languageCode_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * + * Optional (but recommended) + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If + * specified, the ListVoices call will only return voices that can be used to + * synthesize this language_code. E.g. when specifying "en-NZ", you will get + * supported "en-*" voices; when specifying "no", you will get supported + * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" + * will also get supported "cmn-*" voices; specifying "zh-hk" will also get + * supported "yue-*" voices. + *+ * + *
string language_code = 1;
+ */
+ public com.google.protobuf.ByteString
+ getLanguageCodeBytes() {
+ java.lang.Object ref = languageCode_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ languageCode_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * + * Optional (but recommended) + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If + * specified, the ListVoices call will only return voices that can be used to + * synthesize this language_code. E.g. when specifying "en-NZ", you will get + * supported "en-*" voices; when specifying "no", you will get supported + * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" + * will also get supported "cmn-*" voices; specifying "zh-hk" will also get + * supported "yue-*" voices. + *+ * + *
string language_code = 1;
+ */
+ public Builder setLanguageCode(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ languageCode_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Optional (but recommended) + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If + * specified, the ListVoices call will only return voices that can be used to + * synthesize this language_code. E.g. when specifying "en-NZ", you will get + * supported "en-*" voices; when specifying "no", you will get supported + * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" + * will also get supported "cmn-*" voices; specifying "zh-hk" will also get + * supported "yue-*" voices. + *+ * + *
string language_code = 1;
+ */
+ public Builder clearLanguageCode() {
+
+ languageCode_ = getDefaultInstance().getLanguageCode();
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Optional (but recommended) + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If + * specified, the ListVoices call will only return voices that can be used to + * synthesize this language_code. E.g. when specifying "en-NZ", you will get + * supported "en-*" voices; when specifying "no", you will get supported + * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" + * will also get supported "cmn-*" voices; specifying "zh-hk" will also get + * supported "yue-*" voices. + *+ * + *
string language_code = 1;
+ */
+ public Builder setLanguageCodeBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ languageCode_ = value;
+ onChanged();
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFieldsProto3(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.ListVoicesRequest)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.ListVoicesRequest)
+ private static final com.google.cloud.texttospeech.v1.ListVoicesRequest DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.ListVoicesRequest();
+ }
+
+ public static com.google.cloud.texttospeech.v1.ListVoicesRequest getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * Optional (but recommended) + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If + * specified, the ListVoices call will only return voices that can be used to + * synthesize this language_code. E.g. when specifying "en-NZ", you will get + * supported "en-*" voices; when specifying "no", you will get supported + * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" + * will also get supported "cmn-*" voices; specifying "zh-hk" will also get + * supported "yue-*" voices. + *+ * + *
string language_code = 1;
+ */
+ java.lang.String getLanguageCode();
+ /**
+ * + * Optional (but recommended) + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If + * specified, the ListVoices call will only return voices that can be used to + * synthesize this language_code. E.g. when specifying "en-NZ", you will get + * supported "en-*" voices; when specifying "no", you will get supported + * "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" + * will also get supported "cmn-*" voices; specifying "zh-hk" will also get + * supported "yue-*" voices. + *+ * + *
string language_code = 1;
+ */
+ com.google.protobuf.ByteString
+ getLanguageCodeBytes();
+}
diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesResponse.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesResponse.java
new file mode 100644
index 000000000000..cecca6db7d9b
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesResponse.java
@@ -0,0 +1,834 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/texttospeech/v1/cloud_tts.proto
+
+package com.google.cloud.texttospeech.v1;
+
+/**
+ * + * The message returned to the client by the `ListVoices` method. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.ListVoicesResponse} + */ +public final class ListVoicesResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.ListVoicesResponse) + ListVoicesResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use ListVoicesResponse.newBuilder() to construct. + private ListVoicesResponse(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + private ListVoicesResponse() { + voices_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListVoicesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + voices_ = new java.util.ArrayList
+ * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public java.util.List+ * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public java.util.List extends com.google.cloud.texttospeech.v1.VoiceOrBuilder>
+ getVoicesOrBuilderList() {
+ return voices_;
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public int getVoicesCount() {
+ return voices_.size();
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public com.google.cloud.texttospeech.v1.Voice getVoices(int index) {
+ return voices_.get(index);
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public com.google.cloud.texttospeech.v1.VoiceOrBuilder getVoicesOrBuilder(
+ int index) {
+ return voices_.get(index);
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ for (int i = 0; i < voices_.size(); i++) {
+ output.writeMessage(1, voices_.get(i));
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (int i = 0; i < voices_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, voices_.get(i));
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.texttospeech.v1.ListVoicesResponse)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.texttospeech.v1.ListVoicesResponse other = (com.google.cloud.texttospeech.v1.ListVoicesResponse) obj;
+
+ boolean result = true;
+ result = result && getVoicesList()
+ .equals(other.getVoicesList());
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (getVoicesCount() > 0) {
+ hash = (37 * hash) + VOICES_FIELD_NUMBER;
+ hash = (53 * hash) + getVoicesList().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom(
+ java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom(
+ java.nio.ByteBuffer data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.ListVoicesResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(com.google.cloud.texttospeech.v1.ListVoicesResponse prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * + * The message returned to the client by the `ListVoices` method. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.ListVoicesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder
+ * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public java.util.List+ * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public int getVoicesCount() {
+ if (voicesBuilder_ == null) {
+ return voices_.size();
+ } else {
+ return voicesBuilder_.getCount();
+ }
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public com.google.cloud.texttospeech.v1.Voice getVoices(int index) {
+ if (voicesBuilder_ == null) {
+ return voices_.get(index);
+ } else {
+ return voicesBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public Builder setVoices(
+ int index, com.google.cloud.texttospeech.v1.Voice value) {
+ if (voicesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureVoicesIsMutable();
+ voices_.set(index, value);
+ onChanged();
+ } else {
+ voicesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public Builder setVoices(
+ int index, com.google.cloud.texttospeech.v1.Voice.Builder builderForValue) {
+ if (voicesBuilder_ == null) {
+ ensureVoicesIsMutable();
+ voices_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ voicesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public Builder addVoices(com.google.cloud.texttospeech.v1.Voice value) {
+ if (voicesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureVoicesIsMutable();
+ voices_.add(value);
+ onChanged();
+ } else {
+ voicesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public Builder addVoices(
+ int index, com.google.cloud.texttospeech.v1.Voice value) {
+ if (voicesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureVoicesIsMutable();
+ voices_.add(index, value);
+ onChanged();
+ } else {
+ voicesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public Builder addVoices(
+ com.google.cloud.texttospeech.v1.Voice.Builder builderForValue) {
+ if (voicesBuilder_ == null) {
+ ensureVoicesIsMutable();
+ voices_.add(builderForValue.build());
+ onChanged();
+ } else {
+ voicesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public Builder addVoices(
+ int index, com.google.cloud.texttospeech.v1.Voice.Builder builderForValue) {
+ if (voicesBuilder_ == null) {
+ ensureVoicesIsMutable();
+ voices_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ voicesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public Builder addAllVoices(
+ java.lang.Iterable extends com.google.cloud.texttospeech.v1.Voice> values) {
+ if (voicesBuilder_ == null) {
+ ensureVoicesIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(
+ values, voices_);
+ onChanged();
+ } else {
+ voicesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public Builder clearVoices() {
+ if (voicesBuilder_ == null) {
+ voices_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ voicesBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public Builder removeVoices(int index) {
+ if (voicesBuilder_ == null) {
+ ensureVoicesIsMutable();
+ voices_.remove(index);
+ onChanged();
+ } else {
+ voicesBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public com.google.cloud.texttospeech.v1.Voice.Builder getVoicesBuilder(
+ int index) {
+ return getVoicesFieldBuilder().getBuilder(index);
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public com.google.cloud.texttospeech.v1.VoiceOrBuilder getVoicesOrBuilder(
+ int index) {
+ if (voicesBuilder_ == null) {
+ return voices_.get(index); } else {
+ return voicesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public java.util.List extends com.google.cloud.texttospeech.v1.VoiceOrBuilder>
+ getVoicesOrBuilderList() {
+ if (voicesBuilder_ != null) {
+ return voicesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(voices_);
+ }
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public com.google.cloud.texttospeech.v1.Voice.Builder addVoicesBuilder() {
+ return getVoicesFieldBuilder().addBuilder(
+ com.google.cloud.texttospeech.v1.Voice.getDefaultInstance());
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public com.google.cloud.texttospeech.v1.Voice.Builder addVoicesBuilder(
+ int index) {
+ return getVoicesFieldBuilder().addBuilder(
+ index, com.google.cloud.texttospeech.v1.Voice.getDefaultInstance());
+ }
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ public java.util.List+ * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ java.util.List+ * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ com.google.cloud.texttospeech.v1.Voice getVoices(int index);
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ int getVoicesCount();
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ java.util.List extends com.google.cloud.texttospeech.v1.VoiceOrBuilder>
+ getVoicesOrBuilderList();
+ /**
+ * + * The list of voices. + *+ * + *
repeated .google.cloud.texttospeech.v1.Voice voices = 1;
+ */
+ com.google.cloud.texttospeech.v1.VoiceOrBuilder getVoicesOrBuilder(
+ int index);
+}
diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SsmlVoiceGender.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SsmlVoiceGender.java
new file mode 100644
index 000000000000..12481e55c8b0
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SsmlVoiceGender.java
@@ -0,0 +1,166 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/texttospeech/v1/cloud_tts.proto
+
+package com.google.cloud.texttospeech.v1;
+
+/**
+ * + * Gender of the voice as described in + * [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice). + *+ * + * Protobuf enum {@code google.cloud.texttospeech.v1.SsmlVoiceGender} + */ +public enum SsmlVoiceGender + implements com.google.protobuf.ProtocolMessageEnum { + /** + *
+ * An unspecified gender. + * In VoiceSelectionParams, this means that the client doesn't care which + * gender the selected voice will have. In the Voice field of + * ListVoicesResponse, this may mean that the voice doesn't fit any of the + * other categories in this enum, or that the gender of the voice isn't known. + *+ * + *
SSML_VOICE_GENDER_UNSPECIFIED = 0;
+ */
+ SSML_VOICE_GENDER_UNSPECIFIED(0),
+ /**
+ * + * A male voice. + *+ * + *
MALE = 1;
+ */
+ MALE(1),
+ /**
+ * + * A female voice. + *+ * + *
FEMALE = 2;
+ */
+ FEMALE(2),
+ /**
+ * + * A gender-neutral voice. + *+ * + *
NEUTRAL = 3;
+ */
+ NEUTRAL(3),
+ UNRECOGNIZED(-1),
+ ;
+
+ /**
+ * + * An unspecified gender. + * In VoiceSelectionParams, this means that the client doesn't care which + * gender the selected voice will have. In the Voice field of + * ListVoicesResponse, this may mean that the voice doesn't fit any of the + * other categories in this enum, or that the gender of the voice isn't known. + *+ * + *
SSML_VOICE_GENDER_UNSPECIFIED = 0;
+ */
+ public static final int SSML_VOICE_GENDER_UNSPECIFIED_VALUE = 0;
+ /**
+ * + * A male voice. + *+ * + *
MALE = 1;
+ */
+ public static final int MALE_VALUE = 1;
+ /**
+ * + * A female voice. + *+ * + *
FEMALE = 2;
+ */
+ public static final int FEMALE_VALUE = 2;
+ /**
+ * + * A gender-neutral voice. + *+ * + *
NEUTRAL = 3;
+ */
+ public static final int NEUTRAL_VALUE = 3;
+
+
+ public final int getNumber() {
+ if (this == UNRECOGNIZED) {
+ throw new java.lang.IllegalArgumentException(
+ "Can't get the number of an unknown enum value.");
+ }
+ return value;
+ }
+
+ /**
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+ @java.lang.Deprecated
+ public static SsmlVoiceGender valueOf(int value) {
+ return forNumber(value);
+ }
+
+ public static SsmlVoiceGender forNumber(int value) {
+ switch (value) {
+ case 0: return SSML_VOICE_GENDER_UNSPECIFIED;
+ case 1: return MALE;
+ case 2: return FEMALE;
+ case 3: return NEUTRAL;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap+ * Contains text input to be synthesized. Either `text` or `ssml` must be + * supplied. Supplying both or neither returns + * [google.rpc.Code.INVALID_ARGUMENT][]. The input size is limited to 5000 + * characters. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.SynthesisInput} + */ +public final class SynthesisInput extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.SynthesisInput) + SynthesisInputOrBuilder { +private static final long serialVersionUID = 0L; + // Use SynthesisInput.newBuilder() to construct. + private SynthesisInput(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + private SynthesisInput() { + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SynthesisInput( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + inputSourceCase_ = 1; + inputSource_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + inputSourceCase_ = 2; + inputSource_ = s; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesisInput_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesisInput_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.SynthesisInput.class, com.google.cloud.texttospeech.v1.SynthesisInput.Builder.class); + } + + private int inputSourceCase_ = 0; + private java.lang.Object inputSource_; + public enum InputSourceCase + implements com.google.protobuf.Internal.EnumLite { + TEXT(1), + SSML(2), + INPUTSOURCE_NOT_SET(0); + private final int value; + private InputSourceCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static InputSourceCase valueOf(int value) { + return forNumber(value); + } + + public static InputSourceCase forNumber(int value) { + switch (value) { + case 1: return TEXT; + case 2: return SSML; + case 0: return INPUTSOURCE_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public InputSourceCase + getInputSourceCase() { + return InputSourceCase.forNumber( + inputSourceCase_); + } + + public static final int TEXT_FIELD_NUMBER = 1; + /** + *
+ * The raw text to be synthesized. + *+ * + *
string text = 1;
+ */
+ public java.lang.String getText() {
+ java.lang.Object ref = "";
+ if (inputSourceCase_ == 1) {
+ ref = inputSource_;
+ }
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (inputSourceCase_ == 1) {
+ inputSource_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * + * The raw text to be synthesized. + *+ * + *
string text = 1;
+ */
+ public com.google.protobuf.ByteString
+ getTextBytes() {
+ java.lang.Object ref = "";
+ if (inputSourceCase_ == 1) {
+ ref = inputSource_;
+ }
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ if (inputSourceCase_ == 1) {
+ inputSource_ = b;
+ }
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int SSML_FIELD_NUMBER = 2;
+ /**
+ * + * The SSML document to be synthesized. The SSML document must be valid + * and well-formed. Otherwise the RPC will fail and return + * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see + * [SSML](/speech/text-to-speech/docs/ssml). + *+ * + *
string ssml = 2;
+ */
+ public java.lang.String getSsml() {
+ java.lang.Object ref = "";
+ if (inputSourceCase_ == 2) {
+ ref = inputSource_;
+ }
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (inputSourceCase_ == 2) {
+ inputSource_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * + * The SSML document to be synthesized. The SSML document must be valid + * and well-formed. Otherwise the RPC will fail and return + * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see + * [SSML](/speech/text-to-speech/docs/ssml). + *+ * + *
string ssml = 2;
+ */
+ public com.google.protobuf.ByteString
+ getSsmlBytes() {
+ java.lang.Object ref = "";
+ if (inputSourceCase_ == 2) {
+ ref = inputSource_;
+ }
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ if (inputSourceCase_ == 2) {
+ inputSource_ = b;
+ }
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (inputSourceCase_ == 1) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, inputSource_);
+ }
+ if (inputSourceCase_ == 2) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 2, inputSource_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (inputSourceCase_ == 1) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, inputSource_);
+ }
+ if (inputSourceCase_ == 2) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, inputSource_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.texttospeech.v1.SynthesisInput)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.texttospeech.v1.SynthesisInput other = (com.google.cloud.texttospeech.v1.SynthesisInput) obj;
+
+ boolean result = true;
+ result = result && getInputSourceCase().equals(
+ other.getInputSourceCase());
+ if (!result) return false;
+ switch (inputSourceCase_) {
+ case 1:
+ result = result && getText()
+ .equals(other.getText());
+ break;
+ case 2:
+ result = result && getSsml()
+ .equals(other.getSsml());
+ break;
+ case 0:
+ default:
+ }
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ switch (inputSourceCase_) {
+ case 1:
+ hash = (37 * hash) + TEXT_FIELD_NUMBER;
+ hash = (53 * hash) + getText().hashCode();
+ break;
+ case 2:
+ hash = (37 * hash) + SSML_FIELD_NUMBER;
+ hash = (53 * hash) + getSsml().hashCode();
+ break;
+ case 0:
+ default:
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom(
+ java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom(
+ java.nio.ByteBuffer data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesisInput parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesisInput parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesisInput parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(com.google.cloud.texttospeech.v1.SynthesisInput prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * + * Contains text input to be synthesized. Either `text` or `ssml` must be + * supplied. Supplying both or neither returns + * [google.rpc.Code.INVALID_ARGUMENT][]. The input size is limited to 5000 + * characters. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.SynthesisInput} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder
+ * The raw text to be synthesized. + *+ * + *
string text = 1;
+ */
+ public java.lang.String getText() {
+ java.lang.Object ref = "";
+ if (inputSourceCase_ == 1) {
+ ref = inputSource_;
+ }
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (inputSourceCase_ == 1) {
+ inputSource_ = s;
+ }
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * + * The raw text to be synthesized. + *+ * + *
string text = 1;
+ */
+ public com.google.protobuf.ByteString
+ getTextBytes() {
+ java.lang.Object ref = "";
+ if (inputSourceCase_ == 1) {
+ ref = inputSource_;
+ }
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ if (inputSourceCase_ == 1) {
+ inputSource_ = b;
+ }
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * + * The raw text to be synthesized. + *+ * + *
string text = 1;
+ */
+ public Builder setText(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ inputSourceCase_ = 1;
+ inputSource_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The raw text to be synthesized. + *+ * + *
string text = 1;
+ */
+ public Builder clearText() {
+ if (inputSourceCase_ == 1) {
+ inputSourceCase_ = 0;
+ inputSource_ = null;
+ onChanged();
+ }
+ return this;
+ }
+ /**
+ * + * The raw text to be synthesized. + *+ * + *
string text = 1;
+ */
+ public Builder setTextBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+ inputSourceCase_ = 1;
+ inputSource_ = value;
+ onChanged();
+ return this;
+ }
+
+ /**
+ * + * The SSML document to be synthesized. The SSML document must be valid + * and well-formed. Otherwise the RPC will fail and return + * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see + * [SSML](/speech/text-to-speech/docs/ssml). + *+ * + *
string ssml = 2;
+ */
+ public java.lang.String getSsml() {
+ java.lang.Object ref = "";
+ if (inputSourceCase_ == 2) {
+ ref = inputSource_;
+ }
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (inputSourceCase_ == 2) {
+ inputSource_ = s;
+ }
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * + * The SSML document to be synthesized. The SSML document must be valid + * and well-formed. Otherwise the RPC will fail and return + * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see + * [SSML](/speech/text-to-speech/docs/ssml). + *+ * + *
string ssml = 2;
+ */
+ public com.google.protobuf.ByteString
+ getSsmlBytes() {
+ java.lang.Object ref = "";
+ if (inputSourceCase_ == 2) {
+ ref = inputSource_;
+ }
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ if (inputSourceCase_ == 2) {
+ inputSource_ = b;
+ }
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * + * The SSML document to be synthesized. The SSML document must be valid + * and well-formed. Otherwise the RPC will fail and return + * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see + * [SSML](/speech/text-to-speech/docs/ssml). + *+ * + *
string ssml = 2;
+ */
+ public Builder setSsml(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ inputSourceCase_ = 2;
+ inputSource_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The SSML document to be synthesized. The SSML document must be valid + * and well-formed. Otherwise the RPC will fail and return + * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see + * [SSML](/speech/text-to-speech/docs/ssml). + *+ * + *
string ssml = 2;
+ */
+ public Builder clearSsml() {
+ if (inputSourceCase_ == 2) {
+ inputSourceCase_ = 0;
+ inputSource_ = null;
+ onChanged();
+ }
+ return this;
+ }
+ /**
+ * + * The SSML document to be synthesized. The SSML document must be valid + * and well-formed. Otherwise the RPC will fail and return + * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see + * [SSML](/speech/text-to-speech/docs/ssml). + *+ * + *
string ssml = 2;
+ */
+ public Builder setSsmlBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+ inputSourceCase_ = 2;
+ inputSource_ = value;
+ onChanged();
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFieldsProto3(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.SynthesisInput)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.SynthesisInput)
+ private static final com.google.cloud.texttospeech.v1.SynthesisInput DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.SynthesisInput();
+ }
+
+ public static com.google.cloud.texttospeech.v1.SynthesisInput getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * The raw text to be synthesized. + *+ * + *
string text = 1;
+ */
+ java.lang.String getText();
+ /**
+ * + * The raw text to be synthesized. + *+ * + *
string text = 1;
+ */
+ com.google.protobuf.ByteString
+ getTextBytes();
+
+ /**
+ * + * The SSML document to be synthesized. The SSML document must be valid + * and well-formed. Otherwise the RPC will fail and return + * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see + * [SSML](/speech/text-to-speech/docs/ssml). + *+ * + *
string ssml = 2;
+ */
+ java.lang.String getSsml();
+ /**
+ * + * The SSML document to be synthesized. The SSML document must be valid + * and well-formed. Otherwise the RPC will fail and return + * [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see + * [SSML](/speech/text-to-speech/docs/ssml). + *+ * + *
string ssml = 2;
+ */
+ com.google.protobuf.ByteString
+ getSsmlBytes();
+
+ public com.google.cloud.texttospeech.v1.SynthesisInput.InputSourceCase getInputSourceCase();
+}
diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechRequest.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechRequest.java
new file mode 100644
index 000000000000..bd0b1c6f228d
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechRequest.java
@@ -0,0 +1,1084 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/texttospeech/v1/cloud_tts.proto
+
+package com.google.cloud.texttospeech.v1;
+
+/**
+ * + * The top-level message sent by the client for the `SynthesizeSpeech` method. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.SynthesizeSpeechRequest} + */ +public final class SynthesizeSpeechRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.SynthesizeSpeechRequest) + SynthesizeSpeechRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use SynthesizeSpeechRequest.newBuilder() to construct. + private SynthesizeSpeechRequest(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + private SynthesizeSpeechRequest() { + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SynthesizeSpeechRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + com.google.cloud.texttospeech.v1.SynthesisInput.Builder subBuilder = null; + if (input_ != null) { + subBuilder = input_.toBuilder(); + } + input_ = input.readMessage(com.google.cloud.texttospeech.v1.SynthesisInput.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(input_); + input_ = subBuilder.buildPartial(); + } + + break; + } + case 18: { + com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder subBuilder = null; + if (voice_ != null) { + subBuilder = voice_.toBuilder(); + } + voice_ = input.readMessage(com.google.cloud.texttospeech.v1.VoiceSelectionParams.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(voice_); + voice_ = subBuilder.buildPartial(); + } + + break; + } + case 26: { + com.google.cloud.texttospeech.v1.AudioConfig.Builder subBuilder = null; + if (audioConfig_ != null) { + subBuilder = audioConfig_.toBuilder(); + } + audioConfig_ = input.readMessage(com.google.cloud.texttospeech.v1.AudioConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(audioConfig_); + audioConfig_ = subBuilder.buildPartial(); + } + + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest.class, com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest.Builder.class); + } + + public static final int INPUT_FIELD_NUMBER = 1; + private com.google.cloud.texttospeech.v1.SynthesisInput input_; + /** + *
+ * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ public boolean hasInput() {
+ return input_ != null;
+ }
+ /**
+ * + * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ public com.google.cloud.texttospeech.v1.SynthesisInput getInput() {
+ return input_ == null ? com.google.cloud.texttospeech.v1.SynthesisInput.getDefaultInstance() : input_;
+ }
+ /**
+ * + * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ public com.google.cloud.texttospeech.v1.SynthesisInputOrBuilder getInputOrBuilder() {
+ return getInput();
+ }
+
+ public static final int VOICE_FIELD_NUMBER = 2;
+ private com.google.cloud.texttospeech.v1.VoiceSelectionParams voice_;
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ public boolean hasVoice() {
+ return voice_ != null;
+ }
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ public com.google.cloud.texttospeech.v1.VoiceSelectionParams getVoice() {
+ return voice_ == null ? com.google.cloud.texttospeech.v1.VoiceSelectionParams.getDefaultInstance() : voice_;
+ }
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ public com.google.cloud.texttospeech.v1.VoiceSelectionParamsOrBuilder getVoiceOrBuilder() {
+ return getVoice();
+ }
+
+ public static final int AUDIO_CONFIG_FIELD_NUMBER = 3;
+ private com.google.cloud.texttospeech.v1.AudioConfig audioConfig_;
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ public boolean hasAudioConfig() {
+ return audioConfig_ != null;
+ }
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ public com.google.cloud.texttospeech.v1.AudioConfig getAudioConfig() {
+ return audioConfig_ == null ? com.google.cloud.texttospeech.v1.AudioConfig.getDefaultInstance() : audioConfig_;
+ }
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ public com.google.cloud.texttospeech.v1.AudioConfigOrBuilder getAudioConfigOrBuilder() {
+ return getAudioConfig();
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (input_ != null) {
+ output.writeMessage(1, getInput());
+ }
+ if (voice_ != null) {
+ output.writeMessage(2, getVoice());
+ }
+ if (audioConfig_ != null) {
+ output.writeMessage(3, getAudioConfig());
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (input_ != null) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, getInput());
+ }
+ if (voice_ != null) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, getVoice());
+ }
+ if (audioConfig_ != null) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, getAudioConfig());
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest other = (com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest) obj;
+
+ boolean result = true;
+ result = result && (hasInput() == other.hasInput());
+ if (hasInput()) {
+ result = result && getInput()
+ .equals(other.getInput());
+ }
+ result = result && (hasVoice() == other.hasVoice());
+ if (hasVoice()) {
+ result = result && getVoice()
+ .equals(other.getVoice());
+ }
+ result = result && (hasAudioConfig() == other.hasAudioConfig());
+ if (hasAudioConfig()) {
+ result = result && getAudioConfig()
+ .equals(other.getAudioConfig());
+ }
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (hasInput()) {
+ hash = (37 * hash) + INPUT_FIELD_NUMBER;
+ hash = (53 * hash) + getInput().hashCode();
+ }
+ if (hasVoice()) {
+ hash = (37 * hash) + VOICE_FIELD_NUMBER;
+ hash = (53 * hash) + getVoice().hashCode();
+ }
+ if (hasAudioConfig()) {
+ hash = (37 * hash) + AUDIO_CONFIG_FIELD_NUMBER;
+ hash = (53 * hash) + getAudioConfig().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom(
+ java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom(
+ java.nio.ByteBuffer data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * + * The top-level message sent by the client for the `SynthesizeSpeech` method. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.SynthesizeSpeechRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder
+ * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ public boolean hasInput() {
+ return inputBuilder_ != null || input_ != null;
+ }
+ /**
+ * + * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ public com.google.cloud.texttospeech.v1.SynthesisInput getInput() {
+ if (inputBuilder_ == null) {
+ return input_ == null ? com.google.cloud.texttospeech.v1.SynthesisInput.getDefaultInstance() : input_;
+ } else {
+ return inputBuilder_.getMessage();
+ }
+ }
+ /**
+ * + * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ public Builder setInput(com.google.cloud.texttospeech.v1.SynthesisInput value) {
+ if (inputBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ input_ = value;
+ onChanged();
+ } else {
+ inputBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ public Builder setInput(
+ com.google.cloud.texttospeech.v1.SynthesisInput.Builder builderForValue) {
+ if (inputBuilder_ == null) {
+ input_ = builderForValue.build();
+ onChanged();
+ } else {
+ inputBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ public Builder mergeInput(com.google.cloud.texttospeech.v1.SynthesisInput value) {
+ if (inputBuilder_ == null) {
+ if (input_ != null) {
+ input_ =
+ com.google.cloud.texttospeech.v1.SynthesisInput.newBuilder(input_).mergeFrom(value).buildPartial();
+ } else {
+ input_ = value;
+ }
+ onChanged();
+ } else {
+ inputBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ public Builder clearInput() {
+ if (inputBuilder_ == null) {
+ input_ = null;
+ onChanged();
+ } else {
+ input_ = null;
+ inputBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ public com.google.cloud.texttospeech.v1.SynthesisInput.Builder getInputBuilder() {
+
+ onChanged();
+ return getInputFieldBuilder().getBuilder();
+ }
+ /**
+ * + * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ public com.google.cloud.texttospeech.v1.SynthesisInputOrBuilder getInputOrBuilder() {
+ if (inputBuilder_ != null) {
+ return inputBuilder_.getMessageOrBuilder();
+ } else {
+ return input_ == null ?
+ com.google.cloud.texttospeech.v1.SynthesisInput.getDefaultInstance() : input_;
+ }
+ }
+ /**
+ * + * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.texttospeech.v1.SynthesisInput, com.google.cloud.texttospeech.v1.SynthesisInput.Builder, com.google.cloud.texttospeech.v1.SynthesisInputOrBuilder>
+ getInputFieldBuilder() {
+ if (inputBuilder_ == null) {
+ inputBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.texttospeech.v1.SynthesisInput, com.google.cloud.texttospeech.v1.SynthesisInput.Builder, com.google.cloud.texttospeech.v1.SynthesisInputOrBuilder>(
+ getInput(),
+ getParentForChildren(),
+ isClean());
+ input_ = null;
+ }
+ return inputBuilder_;
+ }
+
+ private com.google.cloud.texttospeech.v1.VoiceSelectionParams voice_ = null;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.texttospeech.v1.VoiceSelectionParams, com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder, com.google.cloud.texttospeech.v1.VoiceSelectionParamsOrBuilder> voiceBuilder_;
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ public boolean hasVoice() {
+ return voiceBuilder_ != null || voice_ != null;
+ }
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ public com.google.cloud.texttospeech.v1.VoiceSelectionParams getVoice() {
+ if (voiceBuilder_ == null) {
+ return voice_ == null ? com.google.cloud.texttospeech.v1.VoiceSelectionParams.getDefaultInstance() : voice_;
+ } else {
+ return voiceBuilder_.getMessage();
+ }
+ }
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ public Builder setVoice(com.google.cloud.texttospeech.v1.VoiceSelectionParams value) {
+ if (voiceBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ voice_ = value;
+ onChanged();
+ } else {
+ voiceBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ public Builder setVoice(
+ com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder builderForValue) {
+ if (voiceBuilder_ == null) {
+ voice_ = builderForValue.build();
+ onChanged();
+ } else {
+ voiceBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ public Builder mergeVoice(com.google.cloud.texttospeech.v1.VoiceSelectionParams value) {
+ if (voiceBuilder_ == null) {
+ if (voice_ != null) {
+ voice_ =
+ com.google.cloud.texttospeech.v1.VoiceSelectionParams.newBuilder(voice_).mergeFrom(value).buildPartial();
+ } else {
+ voice_ = value;
+ }
+ onChanged();
+ } else {
+ voiceBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ public Builder clearVoice() {
+ if (voiceBuilder_ == null) {
+ voice_ = null;
+ onChanged();
+ } else {
+ voice_ = null;
+ voiceBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ public com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder getVoiceBuilder() {
+
+ onChanged();
+ return getVoiceFieldBuilder().getBuilder();
+ }
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ public com.google.cloud.texttospeech.v1.VoiceSelectionParamsOrBuilder getVoiceOrBuilder() {
+ if (voiceBuilder_ != null) {
+ return voiceBuilder_.getMessageOrBuilder();
+ } else {
+ return voice_ == null ?
+ com.google.cloud.texttospeech.v1.VoiceSelectionParams.getDefaultInstance() : voice_;
+ }
+ }
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.texttospeech.v1.VoiceSelectionParams, com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder, com.google.cloud.texttospeech.v1.VoiceSelectionParamsOrBuilder>
+ getVoiceFieldBuilder() {
+ if (voiceBuilder_ == null) {
+ voiceBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.texttospeech.v1.VoiceSelectionParams, com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder, com.google.cloud.texttospeech.v1.VoiceSelectionParamsOrBuilder>(
+ getVoice(),
+ getParentForChildren(),
+ isClean());
+ voice_ = null;
+ }
+ return voiceBuilder_;
+ }
+
+ private com.google.cloud.texttospeech.v1.AudioConfig audioConfig_ = null;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.texttospeech.v1.AudioConfig, com.google.cloud.texttospeech.v1.AudioConfig.Builder, com.google.cloud.texttospeech.v1.AudioConfigOrBuilder> audioConfigBuilder_;
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ public boolean hasAudioConfig() {
+ return audioConfigBuilder_ != null || audioConfig_ != null;
+ }
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ public com.google.cloud.texttospeech.v1.AudioConfig getAudioConfig() {
+ if (audioConfigBuilder_ == null) {
+ return audioConfig_ == null ? com.google.cloud.texttospeech.v1.AudioConfig.getDefaultInstance() : audioConfig_;
+ } else {
+ return audioConfigBuilder_.getMessage();
+ }
+ }
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ public Builder setAudioConfig(com.google.cloud.texttospeech.v1.AudioConfig value) {
+ if (audioConfigBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ audioConfig_ = value;
+ onChanged();
+ } else {
+ audioConfigBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ public Builder setAudioConfig(
+ com.google.cloud.texttospeech.v1.AudioConfig.Builder builderForValue) {
+ if (audioConfigBuilder_ == null) {
+ audioConfig_ = builderForValue.build();
+ onChanged();
+ } else {
+ audioConfigBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ public Builder mergeAudioConfig(com.google.cloud.texttospeech.v1.AudioConfig value) {
+ if (audioConfigBuilder_ == null) {
+ if (audioConfig_ != null) {
+ audioConfig_ =
+ com.google.cloud.texttospeech.v1.AudioConfig.newBuilder(audioConfig_).mergeFrom(value).buildPartial();
+ } else {
+ audioConfig_ = value;
+ }
+ onChanged();
+ } else {
+ audioConfigBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ public Builder clearAudioConfig() {
+ if (audioConfigBuilder_ == null) {
+ audioConfig_ = null;
+ onChanged();
+ } else {
+ audioConfig_ = null;
+ audioConfigBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ public com.google.cloud.texttospeech.v1.AudioConfig.Builder getAudioConfigBuilder() {
+
+ onChanged();
+ return getAudioConfigFieldBuilder().getBuilder();
+ }
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ public com.google.cloud.texttospeech.v1.AudioConfigOrBuilder getAudioConfigOrBuilder() {
+ if (audioConfigBuilder_ != null) {
+ return audioConfigBuilder_.getMessageOrBuilder();
+ } else {
+ return audioConfig_ == null ?
+ com.google.cloud.texttospeech.v1.AudioConfig.getDefaultInstance() : audioConfig_;
+ }
+ }
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.texttospeech.v1.AudioConfig, com.google.cloud.texttospeech.v1.AudioConfig.Builder, com.google.cloud.texttospeech.v1.AudioConfigOrBuilder>
+ getAudioConfigFieldBuilder() {
+ if (audioConfigBuilder_ == null) {
+ audioConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.texttospeech.v1.AudioConfig, com.google.cloud.texttospeech.v1.AudioConfig.Builder, com.google.cloud.texttospeech.v1.AudioConfigOrBuilder>(
+ getAudioConfig(),
+ getParentForChildren(),
+ isClean());
+ audioConfig_ = null;
+ }
+ return audioConfigBuilder_;
+ }
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFieldsProto3(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.SynthesizeSpeechRequest)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.SynthesizeSpeechRequest)
+ private static final com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest();
+ }
+
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechRequest getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ boolean hasInput();
+ /**
+ * + * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ com.google.cloud.texttospeech.v1.SynthesisInput getInput();
+ /**
+ * + * Required. The Synthesizer requires either plain text or SSML as input. + *+ * + *
.google.cloud.texttospeech.v1.SynthesisInput input = 1;
+ */
+ com.google.cloud.texttospeech.v1.SynthesisInputOrBuilder getInputOrBuilder();
+
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ boolean hasVoice();
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ com.google.cloud.texttospeech.v1.VoiceSelectionParams getVoice();
+ /**
+ * + * Required. The desired voice of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.VoiceSelectionParams voice = 2;
+ */
+ com.google.cloud.texttospeech.v1.VoiceSelectionParamsOrBuilder getVoiceOrBuilder();
+
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ boolean hasAudioConfig();
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ com.google.cloud.texttospeech.v1.AudioConfig getAudioConfig();
+ /**
+ * + * Required. The configuration of the synthesized audio. + *+ * + *
.google.cloud.texttospeech.v1.AudioConfig audio_config = 3;
+ */
+ com.google.cloud.texttospeech.v1.AudioConfigOrBuilder getAudioConfigOrBuilder();
+}
diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechResponse.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechResponse.java
new file mode 100644
index 000000000000..19ea28eb872b
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SynthesizeSpeechResponse.java
@@ -0,0 +1,486 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/texttospeech/v1/cloud_tts.proto
+
+package com.google.cloud.texttospeech.v1;
+
+/**
+ * + * The message returned to the client by the `SynthesizeSpeech` method. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.SynthesizeSpeechResponse} + */ +public final class SynthesizeSpeechResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.SynthesizeSpeechResponse) + SynthesizeSpeechResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use SynthesizeSpeechResponse.newBuilder() to construct. + private SynthesizeSpeechResponse(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + private SynthesizeSpeechResponse() { + audioContent_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SynthesizeSpeechResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + + audioContent_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse.class, com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse.Builder.class); + } + + public static final int AUDIO_CONTENT_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString audioContent_; + /** + *
+ * The audio data bytes encoded as specified in the request, including the + * header (For LINEAR16 audio, we include the WAV header). Note: as + * with all bytes fields, protobuffers use a pure binary representation, + * whereas JSON representations use base64. + *+ * + *
bytes audio_content = 1;
+ */
+ public com.google.protobuf.ByteString getAudioContent() {
+ return audioContent_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (!audioContent_.isEmpty()) {
+ output.writeBytes(1, audioContent_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (!audioContent_.isEmpty()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, audioContent_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse other = (com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse) obj;
+
+ boolean result = true;
+ result = result && getAudioContent()
+ .equals(other.getAudioContent());
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + AUDIO_CONTENT_FIELD_NUMBER;
+ hash = (53 * hash) + getAudioContent().hashCode();
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom(
+ java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom(
+ java.nio.ByteBuffer data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * + * The message returned to the client by the `SynthesizeSpeech` method. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.SynthesizeSpeechResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder
+ * The audio data bytes encoded as specified in the request, including the + * header (For LINEAR16 audio, we include the WAV header). Note: as + * with all bytes fields, protobuffers use a pure binary representation, + * whereas JSON representations use base64. + *+ * + *
bytes audio_content = 1;
+ */
+ public com.google.protobuf.ByteString getAudioContent() {
+ return audioContent_;
+ }
+ /**
+ * + * The audio data bytes encoded as specified in the request, including the + * header (For LINEAR16 audio, we include the WAV header). Note: as + * with all bytes fields, protobuffers use a pure binary representation, + * whereas JSON representations use base64. + *+ * + *
bytes audio_content = 1;
+ */
+ public Builder setAudioContent(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ audioContent_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The audio data bytes encoded as specified in the request, including the + * header (For LINEAR16 audio, we include the WAV header). Note: as + * with all bytes fields, protobuffers use a pure binary representation, + * whereas JSON representations use base64. + *+ * + *
bytes audio_content = 1;
+ */
+ public Builder clearAudioContent() {
+
+ audioContent_ = getDefaultInstance().getAudioContent();
+ onChanged();
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFieldsProto3(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.SynthesizeSpeechResponse)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.SynthesizeSpeechResponse)
+ private static final com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse();
+ }
+
+ public static com.google.cloud.texttospeech.v1.SynthesizeSpeechResponse getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * The audio data bytes encoded as specified in the request, including the + * header (For LINEAR16 audio, we include the WAV header). Note: as + * with all bytes fields, protobuffers use a pure binary representation, + * whereas JSON representations use base64. + *+ * + *
bytes audio_content = 1;
+ */
+ com.google.protobuf.ByteString getAudioContent();
+}
diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechProto.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechProto.java
new file mode 100644
index 000000000000..b06155ef6cd8
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechProto.java
@@ -0,0 +1,180 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/texttospeech/v1/cloud_tts.proto
+
+package com.google.cloud.texttospeech.v1;
+
+public final class TextToSpeechProto {
+ private TextToSpeechProto() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistryLite registry) {
+ }
+
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ registerAllExtensions(
+ (com.google.protobuf.ExtensionRegistryLite) registry);
+ }
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_descriptor;
+ static final
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_descriptor;
+ static final
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_texttospeech_v1_Voice_descriptor;
+ static final
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_texttospeech_v1_Voice_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_descriptor;
+ static final
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_texttospeech_v1_SynthesisInput_descriptor;
+ static final
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_texttospeech_v1_SynthesisInput_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_descriptor;
+ static final
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_texttospeech_v1_AudioConfig_descriptor;
+ static final
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_texttospeech_v1_AudioConfig_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_descriptor;
+ static final
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n,google/cloud/texttospeech/v1/cloud_tts" +
+ ".proto\022\034google.cloud.texttospeech.v1\032\034go" +
+ "ogle/api/annotations.proto\"*\n\021ListVoices" +
+ "Request\022\025\n\rlanguage_code\030\001 \001(\t\"I\n\022ListVo" +
+ "icesResponse\0223\n\006voices\030\001 \003(\0132#.google.cl" +
+ "oud.texttospeech.v1.Voice\"\224\001\n\005Voice\022\026\n\016l" +
+ "anguage_codes\030\001 \003(\t\022\014\n\004name\030\002 \001(\t\022B\n\013ssm" +
+ "l_gender\030\003 \001(\0162-.google.cloud.texttospee" +
+ "ch.v1.SsmlVoiceGender\022!\n\031natural_sample_" +
+ "rate_hertz\030\004 \001(\005\"\332\001\n\027SynthesizeSpeechReq" +
+ "uest\022;\n\005input\030\001 \001(\0132,.google.cloud.textt" +
+ "ospeech.v1.SynthesisInput\022A\n\005voice\030\002 \001(\013" +
+ "22.google.cloud.texttospeech.v1.VoiceSel" +
+ "ectionParams\022?\n\014audio_config\030\003 \001(\0132).goo" +
+ "gle.cloud.texttospeech.v1.AudioConfig\"@\n" +
+ "\016SynthesisInput\022\016\n\004text\030\001 \001(\tH\000\022\016\n\004ssml\030" +
+ "\002 \001(\tH\000B\016\n\014input_source\"\177\n\024VoiceSelectio" +
+ "nParams\022\025\n\rlanguage_code\030\001 \001(\t\022\014\n\004name\030\002" +
+ " \001(\t\022B\n\013ssml_gender\030\003 \001(\0162-.google.cloud" +
+ ".texttospeech.v1.SsmlVoiceGender\"\253\001\n\013Aud" +
+ "ioConfig\022C\n\016audio_encoding\030\001 \001(\0162+.googl" +
+ "e.cloud.texttospeech.v1.AudioEncoding\022\025\n" +
+ "\rspeaking_rate\030\002 \001(\001\022\r\n\005pitch\030\003 \001(\001\022\026\n\016v" +
+ "olume_gain_db\030\004 \001(\001\022\031\n\021sample_rate_hertz" +
+ "\030\005 \001(\005\"1\n\030SynthesizeSpeechResponse\022\025\n\rau" +
+ "dio_content\030\001 \001(\014*W\n\017SsmlVoiceGender\022!\n\035" +
+ "SSML_VOICE_GENDER_UNSPECIFIED\020\000\022\010\n\004MALE\020" +
+ "\001\022\n\n\006FEMALE\020\002\022\013\n\007NEUTRAL\020\003*T\n\rAudioEncod" +
+ "ing\022\036\n\032AUDIO_ENCODING_UNSPECIFIED\020\000\022\014\n\010L" +
+ "INEAR16\020\001\022\007\n\003MP3\020\002\022\014\n\010OGG_OPUS\020\0032\270\002\n\014Tex" +
+ "tToSpeech\022\203\001\n\nListVoices\022/.google.cloud." +
+ "texttospeech.v1.ListVoicesRequest\0320.goog" +
+ "le.cloud.texttospeech.v1.ListVoicesRespo" +
+ "nse\"\022\202\323\344\223\002\014\022\n/v1/voices\022\241\001\n\020SynthesizeSp" +
+ "eech\0225.google.cloud.texttospeech.v1.Synt" +
+ "hesizeSpeechRequest\0326.google.cloud.textt" +
+ "ospeech.v1.SynthesizeSpeechResponse\"\036\202\323\344" +
+ "\223\002\030\"\023/v1/text:synthesize:\001*B\302\001\n com.goog" +
+ "le.cloud.texttospeech.v1B\021TextToSpeechPr" +
+ "otoP\001ZHgoogle.golang.org/genproto/google" +
+ "apis/cloud/texttospeech/v1;texttospeech\370" +
+ "\001\001\252\002\034Google.Cloud.TextToSpeech.V1\312\002\034Goog" +
+ "le\\Cloud\\TextToSpeech\\V1b\006proto3"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ com.google.api.AnnotationsProto.getDescriptor(),
+ }, assigner);
+ internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_descriptor,
+ new java.lang.String[] { "LanguageCode", });
+ internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_texttospeech_v1_ListVoicesResponse_descriptor,
+ new java.lang.String[] { "Voices", });
+ internal_static_google_cloud_texttospeech_v1_Voice_descriptor =
+ getDescriptor().getMessageTypes().get(2);
+ internal_static_google_cloud_texttospeech_v1_Voice_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_texttospeech_v1_Voice_descriptor,
+ new java.lang.String[] { "LanguageCodes", "Name", "SsmlGender", "NaturalSampleRateHertz", });
+ internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_descriptor =
+ getDescriptor().getMessageTypes().get(3);
+ internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechRequest_descriptor,
+ new java.lang.String[] { "Input", "Voice", "AudioConfig", });
+ internal_static_google_cloud_texttospeech_v1_SynthesisInput_descriptor =
+ getDescriptor().getMessageTypes().get(4);
+ internal_static_google_cloud_texttospeech_v1_SynthesisInput_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_texttospeech_v1_SynthesisInput_descriptor,
+ new java.lang.String[] { "Text", "Ssml", "InputSource", });
+ internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_descriptor =
+ getDescriptor().getMessageTypes().get(5);
+ internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_descriptor,
+ new java.lang.String[] { "LanguageCode", "Name", "SsmlGender", });
+ internal_static_google_cloud_texttospeech_v1_AudioConfig_descriptor =
+ getDescriptor().getMessageTypes().get(6);
+ internal_static_google_cloud_texttospeech_v1_AudioConfig_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_texttospeech_v1_AudioConfig_descriptor,
+ new java.lang.String[] { "AudioEncoding", "SpeakingRate", "Pitch", "VolumeGainDb", "SampleRateHertz", });
+ internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_descriptor =
+ getDescriptor().getMessageTypes().get(7);
+ internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_texttospeech_v1_SynthesizeSpeechResponse_descriptor,
+ new java.lang.String[] { "AudioContent", });
+ com.google.protobuf.ExtensionRegistry registry =
+ com.google.protobuf.ExtensionRegistry.newInstance();
+ registry.add(com.google.api.AnnotationsProto.http);
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalUpdateFileDescriptor(descriptor, registry);
+ com.google.api.AnnotationsProto.getDescriptor();
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/Voice.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/Voice.java
new file mode 100644
index 000000000000..63b486b6e0d4
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/Voice.java
@@ -0,0 +1,990 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/texttospeech/v1/cloud_tts.proto
+
+package com.google.cloud.texttospeech.v1;
+
+/**
+ * + * Description of a voice supported by the TTS service. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.Voice} + */ +public final class Voice extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.Voice) + VoiceOrBuilder { +private static final long serialVersionUID = 0L; + // Use Voice.newBuilder() to construct. + private Voice(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + private Voice() { + languageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY; + name_ = ""; + ssmlGender_ = 0; + naturalSampleRateHertz_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Voice( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + languageCodes_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + languageCodes_.add(s); + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 24: { + int rawValue = input.readEnum(); + + ssmlGender_ = rawValue; + break; + } + case 32: { + + naturalSampleRateHertz_ = input.readInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + languageCodes_ = languageCodes_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_Voice_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_Voice_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.Voice.class, com.google.cloud.texttospeech.v1.Voice.Builder.class); + } + + private int bitField0_; + public static final int LANGUAGE_CODES_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList languageCodes_; + /** + *
+ * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ public com.google.protobuf.ProtocolStringList
+ getLanguageCodesList() {
+ return languageCodes_;
+ }
+ /**
+ * + * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ public int getLanguageCodesCount() {
+ return languageCodes_.size();
+ }
+ /**
+ * + * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ public java.lang.String getLanguageCodes(int index) {
+ return languageCodes_.get(index);
+ }
+ /**
+ * + * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ public com.google.protobuf.ByteString
+ getLanguageCodesBytes(int index) {
+ return languageCodes_.getByteString(index);
+ }
+
+ public static final int NAME_FIELD_NUMBER = 2;
+ private volatile java.lang.Object name_;
+ /**
+ * + * The name of this voice. Each distinct voice has a unique name. + *+ * + *
string name = 2;
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ name_ = s;
+ return s;
+ }
+ }
+ /**
+ * + * The name of this voice. Each distinct voice has a unique name. + *+ * + *
string name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int SSML_GENDER_FIELD_NUMBER = 3;
+ private int ssmlGender_;
+ /**
+ * + * The gender of this voice. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public int getSsmlGenderValue() {
+ return ssmlGender_;
+ }
+ /**
+ * + * The gender of this voice. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public com.google.cloud.texttospeech.v1.SsmlVoiceGender getSsmlGender() {
+ com.google.cloud.texttospeech.v1.SsmlVoiceGender result = com.google.cloud.texttospeech.v1.SsmlVoiceGender.valueOf(ssmlGender_);
+ return result == null ? com.google.cloud.texttospeech.v1.SsmlVoiceGender.UNRECOGNIZED : result;
+ }
+
+ public static final int NATURAL_SAMPLE_RATE_HERTZ_FIELD_NUMBER = 4;
+ private int naturalSampleRateHertz_;
+ /**
+ * + * The natural sample rate (in hertz) for this voice. + *+ * + *
int32 natural_sample_rate_hertz = 4;
+ */
+ public int getNaturalSampleRateHertz() {
+ return naturalSampleRateHertz_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ for (int i = 0; i < languageCodes_.size(); i++) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, languageCodes_.getRaw(i));
+ }
+ if (!getNameBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_);
+ }
+ if (ssmlGender_ != com.google.cloud.texttospeech.v1.SsmlVoiceGender.SSML_VOICE_GENDER_UNSPECIFIED.getNumber()) {
+ output.writeEnum(3, ssmlGender_);
+ }
+ if (naturalSampleRateHertz_ != 0) {
+ output.writeInt32(4, naturalSampleRateHertz_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ {
+ int dataSize = 0;
+ for (int i = 0; i < languageCodes_.size(); i++) {
+ dataSize += computeStringSizeNoTag(languageCodes_.getRaw(i));
+ }
+ size += dataSize;
+ size += 1 * getLanguageCodesList().size();
+ }
+ if (!getNameBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_);
+ }
+ if (ssmlGender_ != com.google.cloud.texttospeech.v1.SsmlVoiceGender.SSML_VOICE_GENDER_UNSPECIFIED.getNumber()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(3, ssmlGender_);
+ }
+ if (naturalSampleRateHertz_ != 0) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, naturalSampleRateHertz_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.texttospeech.v1.Voice)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.texttospeech.v1.Voice other = (com.google.cloud.texttospeech.v1.Voice) obj;
+
+ boolean result = true;
+ result = result && getLanguageCodesList()
+ .equals(other.getLanguageCodesList());
+ result = result && getName()
+ .equals(other.getName());
+ result = result && ssmlGender_ == other.ssmlGender_;
+ result = result && (getNaturalSampleRateHertz()
+ == other.getNaturalSampleRateHertz());
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (getLanguageCodesCount() > 0) {
+ hash = (37 * hash) + LANGUAGE_CODES_FIELD_NUMBER;
+ hash = (53 * hash) + getLanguageCodesList().hashCode();
+ }
+ hash = (37 * hash) + NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getName().hashCode();
+ hash = (37 * hash) + SSML_GENDER_FIELD_NUMBER;
+ hash = (53 * hash) + ssmlGender_;
+ hash = (37 * hash) + NATURAL_SAMPLE_RATE_HERTZ_FIELD_NUMBER;
+ hash = (53 * hash) + getNaturalSampleRateHertz();
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.texttospeech.v1.Voice parseFrom(
+ java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.Voice parseFrom(
+ java.nio.ByteBuffer data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.Voice parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.Voice parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.Voice parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.Voice parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.Voice parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.Voice parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.Voice parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.Voice parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.Voice parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.Voice parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(com.google.cloud.texttospeech.v1.Voice prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * + * Description of a voice supported by the TTS service. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.Voice} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder
+ * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ public com.google.protobuf.ProtocolStringList
+ getLanguageCodesList() {
+ return languageCodes_.getUnmodifiableView();
+ }
+ /**
+ * + * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ public int getLanguageCodesCount() {
+ return languageCodes_.size();
+ }
+ /**
+ * + * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ public java.lang.String getLanguageCodes(int index) {
+ return languageCodes_.get(index);
+ }
+ /**
+ * + * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ public com.google.protobuf.ByteString
+ getLanguageCodesBytes(int index) {
+ return languageCodes_.getByteString(index);
+ }
+ /**
+ * + * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ public Builder setLanguageCodes(
+ int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureLanguageCodesIsMutable();
+ languageCodes_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ public Builder addLanguageCodes(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureLanguageCodesIsMutable();
+ languageCodes_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ public Builder addAllLanguageCodes(
+ java.lang.Iterable+ * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ public Builder clearLanguageCodes() {
+ languageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ public Builder addLanguageCodesBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+ ensureLanguageCodesIsMutable();
+ languageCodes_.add(value);
+ onChanged();
+ return this;
+ }
+
+ private java.lang.Object name_ = "";
+ /**
+ * + * The name of this voice. Each distinct voice has a unique name. + *+ * + *
string name = 2;
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ name_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * + * The name of this voice. Each distinct voice has a unique name. + *+ * + *
string name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * + * The name of this voice. Each distinct voice has a unique name. + *+ * + *
string name = 2;
+ */
+ public Builder setName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The name of this voice. Each distinct voice has a unique name. + *+ * + *
string name = 2;
+ */
+ public Builder clearName() {
+
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The name of this voice. Each distinct voice has a unique name. + *+ * + *
string name = 2;
+ */
+ public Builder setNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ name_ = value;
+ onChanged();
+ return this;
+ }
+
+ private int ssmlGender_ = 0;
+ /**
+ * + * The gender of this voice. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public int getSsmlGenderValue() {
+ return ssmlGender_;
+ }
+ /**
+ * + * The gender of this voice. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public Builder setSsmlGenderValue(int value) {
+ ssmlGender_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The gender of this voice. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public com.google.cloud.texttospeech.v1.SsmlVoiceGender getSsmlGender() {
+ com.google.cloud.texttospeech.v1.SsmlVoiceGender result = com.google.cloud.texttospeech.v1.SsmlVoiceGender.valueOf(ssmlGender_);
+ return result == null ? com.google.cloud.texttospeech.v1.SsmlVoiceGender.UNRECOGNIZED : result;
+ }
+ /**
+ * + * The gender of this voice. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public Builder setSsmlGender(com.google.cloud.texttospeech.v1.SsmlVoiceGender value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ ssmlGender_ = value.getNumber();
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The gender of this voice. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public Builder clearSsmlGender() {
+
+ ssmlGender_ = 0;
+ onChanged();
+ return this;
+ }
+
+ private int naturalSampleRateHertz_ ;
+ /**
+ * + * The natural sample rate (in hertz) for this voice. + *+ * + *
int32 natural_sample_rate_hertz = 4;
+ */
+ public int getNaturalSampleRateHertz() {
+ return naturalSampleRateHertz_;
+ }
+ /**
+ * + * The natural sample rate (in hertz) for this voice. + *+ * + *
int32 natural_sample_rate_hertz = 4;
+ */
+ public Builder setNaturalSampleRateHertz(int value) {
+
+ naturalSampleRateHertz_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The natural sample rate (in hertz) for this voice. + *+ * + *
int32 natural_sample_rate_hertz = 4;
+ */
+ public Builder clearNaturalSampleRateHertz() {
+
+ naturalSampleRateHertz_ = 0;
+ onChanged();
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFieldsProto3(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.Voice)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.Voice)
+ private static final com.google.cloud.texttospeech.v1.Voice DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.Voice();
+ }
+
+ public static com.google.cloud.texttospeech.v1.Voice getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ java.util.List+ * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ int getLanguageCodesCount();
+ /**
+ * + * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ java.lang.String getLanguageCodes(int index);
+ /**
+ * + * The languages that this voice supports, expressed as + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + * "en-US", "es-419", "cmn-tw"). + *+ * + *
repeated string language_codes = 1;
+ */
+ com.google.protobuf.ByteString
+ getLanguageCodesBytes(int index);
+
+ /**
+ * + * The name of this voice. Each distinct voice has a unique name. + *+ * + *
string name = 2;
+ */
+ java.lang.String getName();
+ /**
+ * + * The name of this voice. Each distinct voice has a unique name. + *+ * + *
string name = 2;
+ */
+ com.google.protobuf.ByteString
+ getNameBytes();
+
+ /**
+ * + * The gender of this voice. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ int getSsmlGenderValue();
+ /**
+ * + * The gender of this voice. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ com.google.cloud.texttospeech.v1.SsmlVoiceGender getSsmlGender();
+
+ /**
+ * + * The natural sample rate (in hertz) for this voice. + *+ * + *
int32 natural_sample_rate_hertz = 4;
+ */
+ int getNaturalSampleRateHertz();
+}
diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceSelectionParams.java b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceSelectionParams.java
new file mode 100644
index 000000000000..27475c7dfbfc
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceSelectionParams.java
@@ -0,0 +1,923 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/texttospeech/v1/cloud_tts.proto
+
+package com.google.cloud.texttospeech.v1;
+
+/**
+ * + * Description of which voice to use for a synthesis request. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.VoiceSelectionParams} + */ +public final class VoiceSelectionParams extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1.VoiceSelectionParams) + VoiceSelectionParamsOrBuilder { +private static final long serialVersionUID = 0L; + // Use VoiceSelectionParams.newBuilder() to construct. + private VoiceSelectionParams(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + private VoiceSelectionParams() { + languageCode_ = ""; + name_ = ""; + ssmlGender_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private VoiceSelectionParams( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + languageCode_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 24: { + int rawValue = input.readEnum(); + + ssmlGender_ = rawValue; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_descriptor; + } + + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1.TextToSpeechProto.internal_static_google_cloud_texttospeech_v1_VoiceSelectionParams_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1.VoiceSelectionParams.class, com.google.cloud.texttospeech.v1.VoiceSelectionParams.Builder.class); + } + + public static final int LANGUAGE_CODE_FIELD_NUMBER = 1; + private volatile java.lang.Object languageCode_; + /** + *
+ * The language (and optionally also the region) of the voice expressed as a + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. + * "en-US". Required. This should not include a script tag (e.g. use + * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred + * from the input provided in the SynthesisInput. The TTS service + * will use this parameter to help choose an appropriate voice. Note that + * the TTS service may choose a voice with a slightly different language code + * than the one selected; it may substitute a different region + * (e.g. using en-US rather than en-CA if there isn't a Canadian voice + * available), or even a different language, e.g. using "nb" (Norwegian + * Bokmal) instead of "no" (Norwegian)". + *+ * + *
string language_code = 1;
+ */
+ public java.lang.String getLanguageCode() {
+ java.lang.Object ref = languageCode_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ languageCode_ = s;
+ return s;
+ }
+ }
+ /**
+ * + * The language (and optionally also the region) of the voice expressed as a + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. + * "en-US". Required. This should not include a script tag (e.g. use + * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred + * from the input provided in the SynthesisInput. The TTS service + * will use this parameter to help choose an appropriate voice. Note that + * the TTS service may choose a voice with a slightly different language code + * than the one selected; it may substitute a different region + * (e.g. using en-US rather than en-CA if there isn't a Canadian voice + * available), or even a different language, e.g. using "nb" (Norwegian + * Bokmal) instead of "no" (Norwegian)". + *+ * + *
string language_code = 1;
+ */
+ public com.google.protobuf.ByteString
+ getLanguageCodeBytes() {
+ java.lang.Object ref = languageCode_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ languageCode_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int NAME_FIELD_NUMBER = 2;
+ private volatile java.lang.Object name_;
+ /**
+ * + * The name of the voice. Optional; if not set, the service will choose a + * voice based on the other parameters such as language_code and gender. + *+ * + *
string name = 2;
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ name_ = s;
+ return s;
+ }
+ }
+ /**
+ * + * The name of the voice. Optional; if not set, the service will choose a + * voice based on the other parameters such as language_code and gender. + *+ * + *
string name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int SSML_GENDER_FIELD_NUMBER = 3;
+ private int ssmlGender_;
+ /**
+ * + * The preferred gender of the voice. Optional; if not set, the service will + * choose a voice based on the other parameters such as language_code and + * name. Note that this is only a preference, not requirement; if a + * voice of the appropriate gender is not available, the synthesizer should + * substitute a voice with a different gender rather than failing the request. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public int getSsmlGenderValue() {
+ return ssmlGender_;
+ }
+ /**
+ * + * The preferred gender of the voice. Optional; if not set, the service will + * choose a voice based on the other parameters such as language_code and + * name. Note that this is only a preference, not requirement; if a + * voice of the appropriate gender is not available, the synthesizer should + * substitute a voice with a different gender rather than failing the request. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public com.google.cloud.texttospeech.v1.SsmlVoiceGender getSsmlGender() {
+ com.google.cloud.texttospeech.v1.SsmlVoiceGender result = com.google.cloud.texttospeech.v1.SsmlVoiceGender.valueOf(ssmlGender_);
+ return result == null ? com.google.cloud.texttospeech.v1.SsmlVoiceGender.UNRECOGNIZED : result;
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (!getLanguageCodeBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, languageCode_);
+ }
+ if (!getNameBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_);
+ }
+ if (ssmlGender_ != com.google.cloud.texttospeech.v1.SsmlVoiceGender.SSML_VOICE_GENDER_UNSPECIFIED.getNumber()) {
+ output.writeEnum(3, ssmlGender_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (!getLanguageCodeBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, languageCode_);
+ }
+ if (!getNameBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_);
+ }
+ if (ssmlGender_ != com.google.cloud.texttospeech.v1.SsmlVoiceGender.SSML_VOICE_GENDER_UNSPECIFIED.getNumber()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(3, ssmlGender_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.texttospeech.v1.VoiceSelectionParams)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.texttospeech.v1.VoiceSelectionParams other = (com.google.cloud.texttospeech.v1.VoiceSelectionParams) obj;
+
+ boolean result = true;
+ result = result && getLanguageCode()
+ .equals(other.getLanguageCode());
+ result = result && getName()
+ .equals(other.getName());
+ result = result && ssmlGender_ == other.ssmlGender_;
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER;
+ hash = (53 * hash) + getLanguageCode().hashCode();
+ hash = (37 * hash) + NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getName().hashCode();
+ hash = (37 * hash) + SSML_GENDER_FIELD_NUMBER;
+ hash = (53 * hash) + ssmlGender_;
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom(
+ java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom(
+ java.nio.ByteBuffer data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.texttospeech.v1.VoiceSelectionParams parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(com.google.cloud.texttospeech.v1.VoiceSelectionParams prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * + * Description of which voice to use for a synthesis request. + *+ * + * Protobuf type {@code google.cloud.texttospeech.v1.VoiceSelectionParams} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder
+ * The language (and optionally also the region) of the voice expressed as a + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. + * "en-US". Required. This should not include a script tag (e.g. use + * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred + * from the input provided in the SynthesisInput. The TTS service + * will use this parameter to help choose an appropriate voice. Note that + * the TTS service may choose a voice with a slightly different language code + * than the one selected; it may substitute a different region + * (e.g. using en-US rather than en-CA if there isn't a Canadian voice + * available), or even a different language, e.g. using "nb" (Norwegian + * Bokmal) instead of "no" (Norwegian)". + *+ * + *
string language_code = 1;
+ */
+ public java.lang.String getLanguageCode() {
+ java.lang.Object ref = languageCode_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ languageCode_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * + * The language (and optionally also the region) of the voice expressed as a + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. + * "en-US". Required. This should not include a script tag (e.g. use + * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred + * from the input provided in the SynthesisInput. The TTS service + * will use this parameter to help choose an appropriate voice. Note that + * the TTS service may choose a voice with a slightly different language code + * than the one selected; it may substitute a different region + * (e.g. using en-US rather than en-CA if there isn't a Canadian voice + * available), or even a different language, e.g. using "nb" (Norwegian + * Bokmal) instead of "no" (Norwegian)". + *+ * + *
string language_code = 1;
+ */
+ public com.google.protobuf.ByteString
+ getLanguageCodeBytes() {
+ java.lang.Object ref = languageCode_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ languageCode_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * + * The language (and optionally also the region) of the voice expressed as a + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. + * "en-US". Required. This should not include a script tag (e.g. use + * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred + * from the input provided in the SynthesisInput. The TTS service + * will use this parameter to help choose an appropriate voice. Note that + * the TTS service may choose a voice with a slightly different language code + * than the one selected; it may substitute a different region + * (e.g. using en-US rather than en-CA if there isn't a Canadian voice + * available), or even a different language, e.g. using "nb" (Norwegian + * Bokmal) instead of "no" (Norwegian)". + *+ * + *
string language_code = 1;
+ */
+ public Builder setLanguageCode(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ languageCode_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The language (and optionally also the region) of the voice expressed as a + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. + * "en-US". Required. This should not include a script tag (e.g. use + * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred + * from the input provided in the SynthesisInput. The TTS service + * will use this parameter to help choose an appropriate voice. Note that + * the TTS service may choose a voice with a slightly different language code + * than the one selected; it may substitute a different region + * (e.g. using en-US rather than en-CA if there isn't a Canadian voice + * available), or even a different language, e.g. using "nb" (Norwegian + * Bokmal) instead of "no" (Norwegian)". + *+ * + *
string language_code = 1;
+ */
+ public Builder clearLanguageCode() {
+
+ languageCode_ = getDefaultInstance().getLanguageCode();
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The language (and optionally also the region) of the voice expressed as a + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. + * "en-US". Required. This should not include a script tag (e.g. use + * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred + * from the input provided in the SynthesisInput. The TTS service + * will use this parameter to help choose an appropriate voice. Note that + * the TTS service may choose a voice with a slightly different language code + * than the one selected; it may substitute a different region + * (e.g. using en-US rather than en-CA if there isn't a Canadian voice + * available), or even a different language, e.g. using "nb" (Norwegian + * Bokmal) instead of "no" (Norwegian)". + *+ * + *
string language_code = 1;
+ */
+ public Builder setLanguageCodeBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ languageCode_ = value;
+ onChanged();
+ return this;
+ }
+
+ private java.lang.Object name_ = "";
+ /**
+ * + * The name of the voice. Optional; if not set, the service will choose a + * voice based on the other parameters such as language_code and gender. + *+ * + *
string name = 2;
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ name_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * + * The name of the voice. Optional; if not set, the service will choose a + * voice based on the other parameters such as language_code and gender. + *+ * + *
string name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * + * The name of the voice. Optional; if not set, the service will choose a + * voice based on the other parameters such as language_code and gender. + *+ * + *
string name = 2;
+ */
+ public Builder setName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The name of the voice. Optional; if not set, the service will choose a + * voice based on the other parameters such as language_code and gender. + *+ * + *
string name = 2;
+ */
+ public Builder clearName() {
+
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The name of the voice. Optional; if not set, the service will choose a + * voice based on the other parameters such as language_code and gender. + *+ * + *
string name = 2;
+ */
+ public Builder setNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ name_ = value;
+ onChanged();
+ return this;
+ }
+
+ private int ssmlGender_ = 0;
+ /**
+ * + * The preferred gender of the voice. Optional; if not set, the service will + * choose a voice based on the other parameters such as language_code and + * name. Note that this is only a preference, not requirement; if a + * voice of the appropriate gender is not available, the synthesizer should + * substitute a voice with a different gender rather than failing the request. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public int getSsmlGenderValue() {
+ return ssmlGender_;
+ }
+ /**
+ * + * The preferred gender of the voice. Optional; if not set, the service will + * choose a voice based on the other parameters such as language_code and + * name. Note that this is only a preference, not requirement; if a + * voice of the appropriate gender is not available, the synthesizer should + * substitute a voice with a different gender rather than failing the request. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public Builder setSsmlGenderValue(int value) {
+ ssmlGender_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The preferred gender of the voice. Optional; if not set, the service will + * choose a voice based on the other parameters such as language_code and + * name. Note that this is only a preference, not requirement; if a + * voice of the appropriate gender is not available, the synthesizer should + * substitute a voice with a different gender rather than failing the request. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public com.google.cloud.texttospeech.v1.SsmlVoiceGender getSsmlGender() {
+ com.google.cloud.texttospeech.v1.SsmlVoiceGender result = com.google.cloud.texttospeech.v1.SsmlVoiceGender.valueOf(ssmlGender_);
+ return result == null ? com.google.cloud.texttospeech.v1.SsmlVoiceGender.UNRECOGNIZED : result;
+ }
+ /**
+ * + * The preferred gender of the voice. Optional; if not set, the service will + * choose a voice based on the other parameters such as language_code and + * name. Note that this is only a preference, not requirement; if a + * voice of the appropriate gender is not available, the synthesizer should + * substitute a voice with a different gender rather than failing the request. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public Builder setSsmlGender(com.google.cloud.texttospeech.v1.SsmlVoiceGender value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ ssmlGender_ = value.getNumber();
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The preferred gender of the voice. Optional; if not set, the service will + * choose a voice based on the other parameters such as language_code and + * name. Note that this is only a preference, not requirement; if a + * voice of the appropriate gender is not available, the synthesizer should + * substitute a voice with a different gender rather than failing the request. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ public Builder clearSsmlGender() {
+
+ ssmlGender_ = 0;
+ onChanged();
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFieldsProto3(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1.VoiceSelectionParams)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1.VoiceSelectionParams)
+ private static final com.google.cloud.texttospeech.v1.VoiceSelectionParams DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1.VoiceSelectionParams();
+ }
+
+ public static com.google.cloud.texttospeech.v1.VoiceSelectionParams getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * The language (and optionally also the region) of the voice expressed as a + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. + * "en-US". Required. This should not include a script tag (e.g. use + * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred + * from the input provided in the SynthesisInput. The TTS service + * will use this parameter to help choose an appropriate voice. Note that + * the TTS service may choose a voice with a slightly different language code + * than the one selected; it may substitute a different region + * (e.g. using en-US rather than en-CA if there isn't a Canadian voice + * available), or even a different language, e.g. using "nb" (Norwegian + * Bokmal) instead of "no" (Norwegian)". + *+ * + *
string language_code = 1;
+ */
+ java.lang.String getLanguageCode();
+ /**
+ * + * The language (and optionally also the region) of the voice expressed as a + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. + * "en-US". Required. This should not include a script tag (e.g. use + * "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred + * from the input provided in the SynthesisInput. The TTS service + * will use this parameter to help choose an appropriate voice. Note that + * the TTS service may choose a voice with a slightly different language code + * than the one selected; it may substitute a different region + * (e.g. using en-US rather than en-CA if there isn't a Canadian voice + * available), or even a different language, e.g. using "nb" (Norwegian + * Bokmal) instead of "no" (Norwegian)". + *+ * + *
string language_code = 1;
+ */
+ com.google.protobuf.ByteString
+ getLanguageCodeBytes();
+
+ /**
+ * + * The name of the voice. Optional; if not set, the service will choose a + * voice based on the other parameters such as language_code and gender. + *+ * + *
string name = 2;
+ */
+ java.lang.String getName();
+ /**
+ * + * The name of the voice. Optional; if not set, the service will choose a + * voice based on the other parameters such as language_code and gender. + *+ * + *
string name = 2;
+ */
+ com.google.protobuf.ByteString
+ getNameBytes();
+
+ /**
+ * + * The preferred gender of the voice. Optional; if not set, the service will + * choose a voice based on the other parameters such as language_code and + * name. Note that this is only a preference, not requirement; if a + * voice of the appropriate gender is not available, the synthesizer should + * substitute a voice with a different gender rather than failing the request. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ int getSsmlGenderValue();
+ /**
+ * + * The preferred gender of the voice. Optional; if not set, the service will + * choose a voice based on the other parameters such as language_code and + * name. Note that this is only a preference, not requirement; if a + * voice of the appropriate gender is not available, the synthesizer should + * substitute a voice with a different gender rather than failing the request. + *+ * + *
.google.cloud.texttospeech.v1.SsmlVoiceGender ssml_gender = 3;
+ */
+ com.google.cloud.texttospeech.v1.SsmlVoiceGender getSsmlGender();
+}
diff --git a/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/proto/google/cloud/texttospeech/v1/cloud_tts.proto b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/proto/google/cloud/texttospeech/v1/cloud_tts.proto
new file mode 100644
index 000000000000..0ccbde389214
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-texttospeech-v1/src/main/proto/google/cloud/texttospeech/v1/cloud_tts.proto
@@ -0,0 +1,225 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.cloud.texttospeech.v1;
+
+import "google/api/annotations.proto";
+
+option cc_enable_arenas = true;
+option csharp_namespace = "Google.Cloud.TextToSpeech.V1";
+option go_package = "google.golang.org/genproto/googleapis/cloud/texttospeech/v1;texttospeech";
+option java_multiple_files = true;
+option java_outer_classname = "TextToSpeechProto";
+option java_package = "com.google.cloud.texttospeech.v1";
+option php_namespace = "Google\\Cloud\\TextToSpeech\\V1";
+
+
+// Service that implements Google Cloud Text-to-Speech API.
+service TextToSpeech {
+ // Returns a list of [Voice][google.cloud.texttospeech.v1.Voice]
+ // supported for synthesis.
+ rpc ListVoices(ListVoicesRequest) returns (ListVoicesResponse) {
+ option (google.api.http) = {
+ get: "/v1/voices"
+ };
+ }
+
+ // Synthesizes speech synchronously: receive results after all text input
+ // has been processed.
+ rpc SynthesizeSpeech(SynthesizeSpeechRequest) returns (SynthesizeSpeechResponse) {
+ option (google.api.http) = {
+ post: "/v1/text:synthesize"
+ body: "*"
+ };
+ }
+}
+
+// The top-level message sent by the client for the `ListVoices` method.
+message ListVoicesRequest {
+ // Optional (but recommended)
+ // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
+ // specified, the ListVoices call will only return voices that can be used to
+ // synthesize this language_code. E.g. when specifying "en-NZ", you will get
+ // supported "en-*" voices; when specifying "no", you will get supported
+ // "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh"
+ // will also get supported "cmn-*" voices; specifying "zh-hk" will also get
+ // supported "yue-*" voices.
+ string language_code = 1;
+}
+
+// The message returned to the client by the `ListVoices` method.
+message ListVoicesResponse {
+ // The list of voices.
+ repeated Voice voices = 1;
+}
+
+// Description of a voice supported by the TTS service.
+message Voice {
+ // The languages that this voice supports, expressed as
+ // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g.
+ // "en-US", "es-419", "cmn-tw").
+ repeated string language_codes = 1;
+
+ // The name of this voice. Each distinct voice has a unique name.
+ string name = 2;
+
+ // The gender of this voice.
+ SsmlVoiceGender ssml_gender = 3;
+
+ // The natural sample rate (in hertz) for this voice.
+ int32 natural_sample_rate_hertz = 4;
+}
+
+// The top-level message sent by the client for the `SynthesizeSpeech` method.
+message SynthesizeSpeechRequest {
+ // Required. The Synthesizer requires either plain text or SSML as input.
+ SynthesisInput input = 1;
+
+ // Required. The desired voice of the synthesized audio.
+ VoiceSelectionParams voice = 2;
+
+ // Required. The configuration of the synthesized audio.
+ AudioConfig audio_config = 3;
+}
+
+// Contains text input to be synthesized. Either `text` or `ssml` must be
+// supplied. Supplying both or neither returns
+// [google.rpc.Code.INVALID_ARGUMENT][]. The input size is limited to 5000
+// characters.
+message SynthesisInput {
+ // The input source, which is either plain text or SSML.
+ oneof input_source {
+ // The raw text to be synthesized.
+ string text = 1;
+
+ // The SSML document to be synthesized. The SSML document must be valid
+ // and well-formed. Otherwise the RPC will fail and return
+ // [google.rpc.Code.INVALID_ARGUMENT][]. For more information, see
+ // [SSML](/speech/text-to-speech/docs/ssml).
+ string ssml = 2;
+ }
+}
+
+// Description of which voice to use for a synthesis request.
+message VoiceSelectionParams {
+ // The language (and optionally also the region) of the voice expressed as a
+ // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g.
+ // "en-US". Required. This should not include a script tag (e.g. use
+ // "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred
+ // from the input provided in the SynthesisInput. The TTS service
+ // will use this parameter to help choose an appropriate voice. Note that
+ // the TTS service may choose a voice with a slightly different language code
+ // than the one selected; it may substitute a different region
+ // (e.g. using en-US rather than en-CA if there isn't a Canadian voice
+ // available), or even a different language, e.g. using "nb" (Norwegian
+ // Bokmal) instead of "no" (Norwegian)".
+ string language_code = 1;
+
+ // The name of the voice. Optional; if not set, the service will choose a
+ // voice based on the other parameters such as language_code and gender.
+ string name = 2;
+
+ // The preferred gender of the voice. Optional; if not set, the service will
+ // choose a voice based on the other parameters such as language_code and
+ // name. Note that this is only a preference, not requirement; if a
+ // voice of the appropriate gender is not available, the synthesizer should
+ // substitute a voice with a different gender rather than failing the request.
+ SsmlVoiceGender ssml_gender = 3;
+}
+
+// Description of audio data to be synthesized.
+message AudioConfig {
+ // Required. The format of the requested audio byte stream.
+ AudioEncoding audio_encoding = 1;
+
+ // Optional speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
+ // native speed supported by the specific voice. 2.0 is twice as fast, and
+ // 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
+ // other values < 0.25 or > 4.0 will return an error.
+ double speaking_rate = 2;
+
+ // Optional speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
+ // semitones from the original pitch. -20 means decrease 20 semitones from the
+ // original pitch.
+ double pitch = 3;
+
+ // Optional volume gain (in dB) of the normal native volume supported by the
+ // specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
+ // 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
+ // will play at approximately half the amplitude of the normal native signal
+ // amplitude. A value of +6.0 (dB) will play at approximately twice the
+ // amplitude of the normal native signal amplitude. Strongly recommend not to
+ // exceed +10 (dB) as there's usually no effective increase in loudness for
+ // any value greater than that.
+ double volume_gain_db = 4;
+
+ // The synthesis sample rate (in hertz) for this audio. Optional. If this is
+ // different from the voice's natural sample rate, then the synthesizer will
+ // honor this request by converting to the desired sample rate (which might
+ // result in worse audio quality), unless the specified sample rate is not
+ // supported for the encoding chosen, in which case it will fail the request
+ // and return [google.rpc.Code.INVALID_ARGUMENT][].
+ int32 sample_rate_hertz = 5;
+}
+
+// The message returned to the client by the `SynthesizeSpeech` method.
+message SynthesizeSpeechResponse {
+ // The audio data bytes encoded as specified in the request, including the
+ // header (For LINEAR16 audio, we include the WAV header). Note: as
+ // with all bytes fields, protobuffers use a pure binary representation,
+ // whereas JSON representations use base64.
+ bytes audio_content = 1;
+}
+
+// Gender of the voice as described in
+// [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice).
+enum SsmlVoiceGender {
+ // An unspecified gender.
+ // In VoiceSelectionParams, this means that the client doesn't care which
+ // gender the selected voice will have. In the Voice field of
+ // ListVoicesResponse, this may mean that the voice doesn't fit any of the
+ // other categories in this enum, or that the gender of the voice isn't known.
+ SSML_VOICE_GENDER_UNSPECIFIED = 0;
+
+ // A male voice.
+ MALE = 1;
+
+ // A female voice.
+ FEMALE = 2;
+
+ // A gender-neutral voice.
+ NEUTRAL = 3;
+}
+
+// Configuration to set up audio encoder. The encoding determines the output
+// audio format that we'd like.
+enum AudioEncoding {
+ // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
+ AUDIO_ENCODING_UNSPECIFIED = 0;
+
+ // Uncompressed 16-bit signed little-endian samples (Linear PCM).
+ // Audio content returned as LINEAR16 also contains a WAV header.
+ LINEAR16 = 1;
+
+ // MP3 audio.
+ MP3 = 2;
+
+ // Opus encoded audio wrapped in an ogg container. The result will be a
+ // file which can be played natively on Android, and in browsers (at least
+ // Chrome and Firefox). The quality of the encoding is considerably higher
+ // than MP3 while using approximately the same bitrate.
+ OGG_OPUS = 3;
+}
diff --git a/google-cloud-bom/pom.xml b/google-cloud-bom/pom.xml
index 764858bc1f04..d5bb627b2b13 100644
--- a/google-cloud-bom/pom.xml
+++ b/google-cloud-bom/pom.xml
@@ -755,6 +755,16 @@
This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *
+ *
+ * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+ * String languageCode = "";
+ * ListVoicesResponse response = textToSpeechClient.listVoices(languageCode);
+ * }
+ *
+ *
+ *
+ * Note: close() needs to be called on the textToSpeechClient object to clean up resources such + * as threads. In the example above, try-with-resources is used, which automatically calls close(). + * + *
The surface of this class includes several types of Java methods for each of the API's + * methods: + * + *
See the individual methods for example code. + * + *
Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *
This class can be customized by passing in a custom instance of TextToSpeechSettings to + * create(). For example: + * + *
To customize credentials: + * + *
+ *
+ * TextToSpeechSettings textToSpeechSettings =
+ * TextToSpeechSettings.newBuilder()
+ * .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ * .build();
+ * TextToSpeechClient textToSpeechClient =
+ * TextToSpeechClient.create(textToSpeechSettings);
+ *
+ *
+ *
+ * To customize the endpoint:
+ *
+ *
+ *
+ * TextToSpeechSettings textToSpeechSettings =
+ * TextToSpeechSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * TextToSpeechClient textToSpeechClient =
+ * TextToSpeechClient.create(textToSpeechSettings);
+ *
+ *
+ */
+@Generated("by gapic-generator")
+@BetaApi
+public class TextToSpeechClient implements BackgroundResource {
+ private final TextToSpeechSettings settings;
+ private final TextToSpeechStub stub;
+
+ /** Constructs an instance of TextToSpeechClient with default settings. */
+ public static final TextToSpeechClient create() throws IOException {
+ return create(TextToSpeechSettings.newBuilder().build());
+ }
+
+ /**
+ * Constructs an instance of TextToSpeechClient, using the given settings. The channels are
+ * created based on the settings passed in, or defaults for any settings that are not set.
+ */
+ public static final TextToSpeechClient create(TextToSpeechSettings settings) throws IOException {
+ return new TextToSpeechClient(settings);
+ }
+
+ /**
+ * Constructs an instance of TextToSpeechClient, using the given stub for making calls. This is
+ * for advanced usage - prefer to use TextToSpeechSettings}.
+ */
+ @BetaApi("A restructuring of stub classes is planned, so this may break in the future")
+ public static final TextToSpeechClient create(TextToSpeechStub stub) {
+ return new TextToSpeechClient(stub);
+ }
+
+ /**
+ * Constructs an instance of TextToSpeechClient, using the given settings. This is protected so
+ * that it is easy to make a subclass, but otherwise, the static factory methods should be
+ * preferred.
+ */
+ protected TextToSpeechClient(TextToSpeechSettings settings) throws IOException {
+ this.settings = settings;
+ this.stub = ((TextToSpeechStubSettings) settings.getStubSettings()).createStub();
+ }
+
+ @BetaApi("A restructuring of stub classes is planned, so this may break in the future")
+ protected TextToSpeechClient(TextToSpeechStub stub) {
+ this.settings = null;
+ this.stub = stub;
+ }
+
+ public final TextToSpeechSettings getSettings() {
+ return settings;
+ }
+
+ @BetaApi("A restructuring of stub classes is planned, so this may break in the future")
+ public TextToSpeechStub getStub() {
+ return stub;
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD
+ /**
+ * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice] supported for synthesis.
+ *
+ * Sample code: + * + *
+ * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+ * String languageCode = "";
+ * ListVoicesResponse response = textToSpeechClient.listVoices(languageCode);
+ * }
+ *
+ *
+ * @param languageCode Optional (but recommended)
+ * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If specified, the
+ * ListVoices call will only return voices that can be used to synthesize this language_code.
+ * E.g. when specifying "en-NZ", you will get supported "en-*" voices; when specifying
+ * "no", you will get supported "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal)
+ * voices; specifying "zh" will also get supported "cmn-*" voices; specifying "zh-hk" will
+ * also get supported "yue-*" voices.
+ * @throws com.google.api.gax.rpc.ApiException if the remote call fails
+ */
+ public final ListVoicesResponse listVoices(String languageCode) {
+
+ ListVoicesRequest request =
+ ListVoicesRequest.newBuilder().setLanguageCode(languageCode).build();
+ return listVoices(request);
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD
+ /**
+ * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice] supported for synthesis.
+ *
+ * Sample code: + * + *
+ * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+ * ListVoicesRequest request = ListVoicesRequest.newBuilder().build();
+ * ListVoicesResponse response = textToSpeechClient.listVoices(request);
+ * }
+ *
+ *
+ * @param request The request object containing all of the parameters for the API call.
+ * @throws com.google.api.gax.rpc.ApiException if the remote call fails
+ */
+ public final ListVoicesResponse listVoices(ListVoicesRequest request) {
+ return listVoicesCallable().call(request);
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD
+ /**
+ * Returns a list of [Voice][google.cloud.texttospeech.v1.Voice] supported for synthesis.
+ *
+ * Sample code: + * + *
+ * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+ * ListVoicesRequest request = ListVoicesRequest.newBuilder().build();
+ * ApiFuture<ListVoicesResponse> future = textToSpeechClient.listVoicesCallable().futureCall(request);
+ * // Do something
+ * ListVoicesResponse response = future.get();
+ * }
+ *
+ */
+ public final UnaryCallableSample code: + * + *
+ * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+ * SynthesisInput input = SynthesisInput.newBuilder().build();
+ * VoiceSelectionParams voice = VoiceSelectionParams.newBuilder().build();
+ * AudioConfig audioConfig = AudioConfig.newBuilder().build();
+ * SynthesizeSpeechResponse response = textToSpeechClient.synthesizeSpeech(input, voice, audioConfig);
+ * }
+ *
+ *
+ * @param input Required. The Synthesizer requires either plain text or SSML as input.
+ * @param voice Required. The desired voice of the synthesized audio.
+ * @param audioConfig Required. The configuration of the synthesized audio.
+ * @throws com.google.api.gax.rpc.ApiException if the remote call fails
+ */
+ public final SynthesizeSpeechResponse synthesizeSpeech(
+ SynthesisInput input, VoiceSelectionParams voice, AudioConfig audioConfig) {
+
+ SynthesizeSpeechRequest request =
+ SynthesizeSpeechRequest.newBuilder()
+ .setInput(input)
+ .setVoice(voice)
+ .setAudioConfig(audioConfig)
+ .build();
+ return synthesizeSpeech(request);
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD
+ /**
+ * Synthesizes speech synchronously: receive results after all text input has been processed.
+ *
+ * Sample code: + * + *
+ * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+ * SynthesisInput input = SynthesisInput.newBuilder().build();
+ * VoiceSelectionParams voice = VoiceSelectionParams.newBuilder().build();
+ * AudioConfig audioConfig = AudioConfig.newBuilder().build();
+ * SynthesizeSpeechRequest request = SynthesizeSpeechRequest.newBuilder()
+ * .setInput(input)
+ * .setVoice(voice)
+ * .setAudioConfig(audioConfig)
+ * .build();
+ * SynthesizeSpeechResponse response = textToSpeechClient.synthesizeSpeech(request);
+ * }
+ *
+ *
+ * @param request The request object containing all of the parameters for the API call.
+ * @throws com.google.api.gax.rpc.ApiException if the remote call fails
+ */
+ public final SynthesizeSpeechResponse synthesizeSpeech(SynthesizeSpeechRequest request) {
+ return synthesizeSpeechCallable().call(request);
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD
+ /**
+ * Synthesizes speech synchronously: receive results after all text input has been processed.
+ *
+ * Sample code: + * + *
+ * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+ * SynthesisInput input = SynthesisInput.newBuilder().build();
+ * VoiceSelectionParams voice = VoiceSelectionParams.newBuilder().build();
+ * AudioConfig audioConfig = AudioConfig.newBuilder().build();
+ * SynthesizeSpeechRequest request = SynthesizeSpeechRequest.newBuilder()
+ * .setInput(input)
+ * .setVoice(voice)
+ * .setAudioConfig(audioConfig)
+ * .build();
+ * ApiFuture<SynthesizeSpeechResponse> future = textToSpeechClient.synthesizeSpeechCallable().futureCall(request);
+ * // Do something
+ * SynthesizeSpeechResponse response = future.get();
+ * }
+ *
+ */
+ public final UnaryCallableThe default instance has everything set to sensible defaults: + * + *
The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. For + * example, to set the total timeout of listVoices to 30 seconds: + * + *
+ *
+ * TextToSpeechSettings.Builder textToSpeechSettingsBuilder =
+ * TextToSpeechSettings.newBuilder();
+ * textToSpeechSettingsBuilder.listVoicesSettings().getRetrySettings().toBuilder()
+ * .setTotalTimeout(Duration.ofSeconds(30));
+ * TextToSpeechSettings textToSpeechSettings = textToSpeechSettingsBuilder.build();
+ *
+ *
+ */
+@Generated("by gapic-generator")
+@BetaApi
+public class TextToSpeechSettings extends ClientSettingsNote: This method does not support applying settings to streaming methods.
+ */
+ public Builder applyToAllUnaryMethods(
+ ApiFunction The interfaces provided are listed below, along with usage samples.
+ *
+ * ================== TextToSpeechClient ==================
+ *
+ * Service Description: Service that implements Google Cloud Text-to-Speech API.
+ *
+ * Sample for TextToSpeechClient:
+ *
+ * This class is for advanced usage.
+ */
+@Generated("by gapic-generator")
+@BetaApi("The surface for use by generated code is not stable yet and may change in the future.")
+public class GrpcTextToSpeechCallableFactory implements GrpcStubCallableFactory {
+ @Override
+ public This class is for advanced usage and reflects the underlying API directly.
+ */
+@Generated("by gapic-generator")
+@BetaApi("A restructuring of stub classes is planned, so this may break in the future")
+public class GrpcTextToSpeechStub extends TextToSpeechStub {
+
+ private static final MethodDescriptor This class is for advanced usage and reflects the underlying API directly.
+ */
+@Generated("by gapic-generator")
+@BetaApi("A restructuring of stub classes is planned, so this may break in the future")
+public abstract class TextToSpeechStub implements BackgroundResource {
+
+ public UnaryCallable The default instance has everything set to sensible defaults:
+ *
+ * The builder of this class is recursive, so contained classes are themselves builders. When
+ * build() is called, the tree of builders is called to create the complete settings object. For
+ * example, to set the total timeout of listVoices to 30 seconds:
+ *
+ * Note: This method does not support applying settings to streaming methods.
+ */
+ public Builder applyToAllUnaryMethods(
+ ApiFunction
+ *
+ */
+package com.google.cloud.texttospeech.v1;
diff --git a/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/GrpcTextToSpeechCallableFactory.java b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/GrpcTextToSpeechCallableFactory.java
new file mode 100644
index 000000000000..efcd67d7c97f
--- /dev/null
+++ b/google-cloud-clients/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/GrpcTextToSpeechCallableFactory.java
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2018 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.texttospeech.v1.stub;
+
+import com.google.api.core.BetaApi;
+import com.google.api.gax.grpc.GrpcCallSettings;
+import com.google.api.gax.grpc.GrpcCallableFactory;
+import com.google.api.gax.grpc.GrpcStubCallableFactory;
+import com.google.api.gax.rpc.BatchingCallSettings;
+import com.google.api.gax.rpc.BidiStreamingCallable;
+import com.google.api.gax.rpc.ClientContext;
+import com.google.api.gax.rpc.ClientStreamingCallable;
+import com.google.api.gax.rpc.OperationCallSettings;
+import com.google.api.gax.rpc.OperationCallable;
+import com.google.api.gax.rpc.PagedCallSettings;
+import com.google.api.gax.rpc.ServerStreamingCallSettings;
+import com.google.api.gax.rpc.ServerStreamingCallable;
+import com.google.api.gax.rpc.StreamingCallSettings;
+import com.google.api.gax.rpc.UnaryCallSettings;
+import com.google.api.gax.rpc.UnaryCallable;
+import com.google.longrunning.Operation;
+import com.google.longrunning.stub.OperationsStub;
+import javax.annotation.Generated;
+
+// AUTO-GENERATED DOCUMENTATION AND CLASS
+/**
+ * gRPC callable factory implementation for Cloud Text-to-Speech API.
+ *
+ *
+ * try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) {
+ * String languageCode = "";
+ * ListVoicesResponse response = textToSpeechClient.listVoices(languageCode);
+ * }
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+@Generated("by gapic-generator")
+@BetaApi
+public class TextToSpeechStubSettings extends StubSettings
+ * TextToSpeechStubSettings.Builder textToSpeechSettingsBuilder =
+ * TextToSpeechStubSettings.newBuilder();
+ * textToSpeechSettingsBuilder.listVoicesSettings().getRetrySettings().toBuilder()
+ * .setTotalTimeout(Duration.ofSeconds(30));
+ * TextToSpeechStubSettings textToSpeechSettings = textToSpeechSettingsBuilder.build();
+ *
+ *