- 2.53.0 (latest)
- 2.52.0
- 2.51.0
- 2.50.0
- 2.48.0
- 2.47.0
- 2.46.0
- 2.45.0
- 2.44.0
- 2.43.0
- 2.42.0
- 2.41.0
- 2.40.0
- 2.39.0
- 2.38.0
- 2.36.0
- 2.35.0
- 2.34.0
- 2.33.0
- 2.32.0
- 2.31.0
- 2.30.0
- 2.29.0
- 2.28.0
- 2.27.0
- 2.26.0
- 2.23.0
- 2.22.0
- 2.21.0
- 2.20.0
- 2.19.0
- 2.18.0
- 2.17.0
- 2.16.0
- 2.15.0
- 2.14.0
- 2.13.0
- 2.12.0
- 2.11.0
- 2.10.0
- 2.8.0
- 2.7.0
- 2.6.0
- 2.5.0
- 2.4.0
- 2.3.0
- 2.2.13
- 2.1.0
- 2.0.27
public interface SpeechTranscriptionConfigOrBuilder extends MessageOrBuilder
Implements
MessageOrBuilderMethods
getAudioTracks(int index)
public abstract int getAudioTracks(int index)
Optional. For file formats, such as MXF or MKV, supporting multiple audio tracks, specify up to two tracks. Default: track 0.
repeated int32 audio_tracks = 6 [(.google.api.field_behavior) = OPTIONAL];
Name | Description |
index | int The index of the element to return. |
Type | Description |
int | The audioTracks at the given index. |
getAudioTracksCount()
public abstract int getAudioTracksCount()
Optional. For file formats, such as MXF or MKV, supporting multiple audio tracks, specify up to two tracks. Default: track 0.
repeated int32 audio_tracks = 6 [(.google.api.field_behavior) = OPTIONAL];
Type | Description |
int | The count of audioTracks. |
getAudioTracksList()
public abstract List<Integer> getAudioTracksList()
Optional. For file formats, such as MXF or MKV, supporting multiple audio tracks, specify up to two tracks. Default: track 0.
repeated int32 audio_tracks = 6 [(.google.api.field_behavior) = OPTIONAL];
Type | Description |
List<Integer> | A list containing the audioTracks. |
getDiarizationSpeakerCount()
public abstract int getDiarizationSpeakerCount()
Optional. If set, specifies the estimated number of speakers in the conversation. If not set, defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
int32 diarization_speaker_count = 8 [(.google.api.field_behavior) = OPTIONAL];
Type | Description |
int | The diarizationSpeakerCount. |
getEnableAutomaticPunctuation()
public abstract boolean getEnableAutomaticPunctuation()
Optional. If 'true', adds punctuation to recognition result hypotheses. This feature is only available in select languages. Setting this for requests in other languages has no effect at all. The default 'false' value does not add punctuation to result hypotheses. NOTE: "This is currently offered as an experimental service, complimentary to all users. In the future this may be exclusively available as a premium feature."
bool enable_automatic_punctuation = 5 [(.google.api.field_behavior) = OPTIONAL];
Type | Description |
boolean | The enableAutomaticPunctuation. |
getEnableSpeakerDiarization()
public abstract boolean getEnableSpeakerDiarization()
Optional. If 'true', enables speaker detection for each recognized word in the top alternative of the recognition result using a speaker_tag provided in the WordInfo. Note: When this is true, we send all the words from the beginning of the audio for the top alternative in every consecutive response. This is done in order to improve our speaker tags as our models learn to identify the speakers in the conversation over time.
bool enable_speaker_diarization = 7 [(.google.api.field_behavior) = OPTIONAL];
Type | Description |
boolean | The enableSpeakerDiarization. |
getEnableWordConfidence()
public abstract boolean getEnableWordConfidence()
Optional. If true
, the top result includes a list of words and the
confidence for those words. If false
, no word-level confidence
information is returned. The default is false
.
bool enable_word_confidence = 9 [(.google.api.field_behavior) = OPTIONAL];
Type | Description |
boolean | The enableWordConfidence. |
getFilterProfanity()
public abstract boolean getFilterProfanity()
Optional. If set to true
, the server will attempt to filter out
profanities, replacing all but the initial character in each filtered word
with asterisks, e.g. "f***". If set to false
or omitted, profanities
won't be filtered out.
bool filter_profanity = 3 [(.google.api.field_behavior) = OPTIONAL];
Type | Description |
boolean | The filterProfanity. |
getLanguageCode()
public abstract String getLanguageCode()
Required. Required The language of the supplied audio as a BCP-47 language tag. Example: "en-US". See Language Support for a list of the currently supported language codes.
string language_code = 1 [(.google.api.field_behavior) = REQUIRED];
Type | Description |
String | The languageCode. |
getLanguageCodeBytes()
public abstract ByteString getLanguageCodeBytes()
Required. Required The language of the supplied audio as a BCP-47 language tag. Example: "en-US". See Language Support for a list of the currently supported language codes.
string language_code = 1 [(.google.api.field_behavior) = REQUIRED];
Type | Description |
ByteString | The bytes for languageCode. |
getMaxAlternatives()
public abstract int getMaxAlternatives()
Optional. Maximum number of recognition hypotheses to be returned.
Specifically, the maximum number of SpeechRecognitionAlternative
messages
within each SpeechTranscription
. The server may return fewer than
max_alternatives
. Valid values are 0
-30
. A value of 0
or 1
will
return a maximum of one. If omitted, will return a maximum of one.
int32 max_alternatives = 2 [(.google.api.field_behavior) = OPTIONAL];
Type | Description |
int | The maxAlternatives. |
getSpeechContexts(int index)
public abstract SpeechContext getSpeechContexts(int index)
Optional. A means to provide context to assist the speech recognition.
repeated .google.cloud.videointelligence.v1p3beta1.SpeechContext speech_contexts = 4 [(.google.api.field_behavior) = OPTIONAL];
Name | Description |
index | int |
Type | Description |
SpeechContext |
getSpeechContextsCount()
public abstract int getSpeechContextsCount()
Optional. A means to provide context to assist the speech recognition.
repeated .google.cloud.videointelligence.v1p3beta1.SpeechContext speech_contexts = 4 [(.google.api.field_behavior) = OPTIONAL];
Type | Description |
int |
getSpeechContextsList()
public abstract List<SpeechContext> getSpeechContextsList()
Optional. A means to provide context to assist the speech recognition.
repeated .google.cloud.videointelligence.v1p3beta1.SpeechContext speech_contexts = 4 [(.google.api.field_behavior) = OPTIONAL];
Type | Description |
List<SpeechContext> |
getSpeechContextsOrBuilder(int index)
public abstract SpeechContextOrBuilder getSpeechContextsOrBuilder(int index)
Optional. A means to provide context to assist the speech recognition.
repeated .google.cloud.videointelligence.v1p3beta1.SpeechContext speech_contexts = 4 [(.google.api.field_behavior) = OPTIONAL];
Name | Description |
index | int |
Type | Description |
SpeechContextOrBuilder |
getSpeechContextsOrBuilderList()
public abstract List<? extends SpeechContextOrBuilder> getSpeechContextsOrBuilderList()
Optional. A means to provide context to assist the speech recognition.
repeated .google.cloud.videointelligence.v1p3beta1.SpeechContext speech_contexts = 4 [(.google.api.field_behavior) = OPTIONAL];
Type | Description |
List<? extends com.google.cloud.videointelligence.v1p3beta1.SpeechContextOrBuilder> |