public final class InputAudioConfig extends GeneratedMessageV3 implements InputAudioConfigOrBuilder
Instructs the speech recognizer how to process the audio content.
Protobuf type google.cloud.dialogflow.v2.InputAudioConfig
Inherited Members
com.google.protobuf.GeneratedMessageV3.<ListT>makeMutableCopy(ListT)
Static Fields
public static final int AUDIO_ENCODING_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
public static final int DISABLE_NO_SPEECH_RECOGNIZED_EVENT_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
public static final int ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
public static final int ENABLE_WORD_INFO_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
public static final int LANGUAGE_CODE_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
public static final int MODEL_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
public static final int MODEL_VARIANT_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
public static final int PHRASE_HINTS_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
public static final int SINGLE_UTTERANCE_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
SPEECH_CONTEXTS_FIELD_NUMBER
public static final int SPEECH_CONTEXTS_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
Static Methods
public static InputAudioConfig getDefaultInstance()
public static final Descriptors.Descriptor getDescriptor()
public static InputAudioConfig.Builder newBuilder()
public static InputAudioConfig.Builder newBuilder(InputAudioConfig prototype)
public static InputAudioConfig parseDelimitedFrom(InputStream input)
public static InputAudioConfig parseDelimitedFrom(InputStream input, ExtensionRegistryLite extensionRegistry)
public static InputAudioConfig parseFrom(byte[] data)
Parameter |
Name |
Description |
data |
byte[]
|
public static InputAudioConfig parseFrom(byte[] data, ExtensionRegistryLite extensionRegistry)
public static InputAudioConfig parseFrom(ByteString data)
public static InputAudioConfig parseFrom(ByteString data, ExtensionRegistryLite extensionRegistry)
public static InputAudioConfig parseFrom(CodedInputStream input)
public static InputAudioConfig parseFrom(CodedInputStream input, ExtensionRegistryLite extensionRegistry)
public static InputAudioConfig parseFrom(InputStream input)
public static InputAudioConfig parseFrom(InputStream input, ExtensionRegistryLite extensionRegistry)
public static InputAudioConfig parseFrom(ByteBuffer data)
public static InputAudioConfig parseFrom(ByteBuffer data, ExtensionRegistryLite extensionRegistry)
public static Parser<InputAudioConfig> parser()
Methods
public boolean equals(Object obj)
Parameter |
Name |
Description |
obj |
Object
|
Overrides
public AudioEncoding getAudioEncoding()
Required. Audio encoding of the audio content to process.
.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
public int getAudioEncodingValue()
Required. Audio encoding of the audio content to process.
.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED];
Returns |
Type |
Description |
int |
The enum numeric value on the wire for audioEncoding.
|
public InputAudioConfig getDefaultInstanceForType()
public boolean getDisableNoSpeechRecognizedEvent()
Only used in
Participants.AnalyzeContent
and
Participants.StreamingAnalyzeContent.
If false
and recognition doesn't return any result, trigger
NO_SPEECH_RECOGNIZED
event to Dialogflow agent.
bool disable_no_speech_recognized_event = 14;
Returns |
Type |
Description |
boolean |
The disableNoSpeechRecognizedEvent.
|
public boolean getEnableAutomaticPunctuation()
Enable automatic punctuation option at the speech backend.
bool enable_automatic_punctuation = 17;
Returns |
Type |
Description |
boolean |
The enableAutomaticPunctuation.
|
public boolean getEnableWordInfo()
If true
, Dialogflow returns
SpeechWordInfo in
StreamingRecognitionResult
with information about the recognized speech words, e.g. start and end time
offsets. If false or unspecified, Speech doesn't return any word-level
information.
bool enable_word_info = 13;
Returns |
Type |
Description |
boolean |
The enableWordInfo.
|
public String getLanguageCode()
Required. The language of the supplied audio. Dialogflow does not do
translations. See Language
Support
for a list of the currently supported language codes. Note that queries in
the same session do not necessarily need to specify the same language.
string language_code = 3 [(.google.api.field_behavior) = REQUIRED];
Returns |
Type |
Description |
String |
The languageCode.
|
public ByteString getLanguageCodeBytes()
Required. The language of the supplied audio. Dialogflow does not do
translations. See Language
Support
for a list of the currently supported language codes. Note that queries in
the same session do not necessarily need to specify the same language.
string language_code = 3 [(.google.api.field_behavior) = REQUIRED];
Returns |
Type |
Description |
ByteString |
The bytes for languageCode.
|
Which Speech model to select for the given request. Select the
model best suited to your domain to get best results. If a model is not
explicitly specified, then we auto-select a model based on the parameters
in the InputAudioConfig.
If enhanced speech model is enabled for the agent and an enhanced
version of the specified model for the language does not exist, then the
speech is recognized using the standard version of the specified model.
Refer to
Cloud Speech API
documentation
for more details.
If you specify a model, the following models typically have the best
performance:
- phone_call (best for Agent Assist and telephony)
- latest_short (best for Dialogflow non-telephony)
- command_and_search (best for very short utterances and commands)
string model = 7;
Returns |
Type |
Description |
String |
The model.
|
public ByteString getModelBytes()
Which Speech model to select for the given request. Select the
model best suited to your domain to get best results. If a model is not
explicitly specified, then we auto-select a model based on the parameters
in the InputAudioConfig.
If enhanced speech model is enabled for the agent and an enhanced
version of the specified model for the language does not exist, then the
speech is recognized using the standard version of the specified model.
Refer to
Cloud Speech API
documentation
for more details.
If you specify a model, the following models typically have the best
performance:
- phone_call (best for Agent Assist and telephony)
- latest_short (best for Dialogflow non-telephony)
- command_and_search (best for very short utterances and commands)
string model = 7;
Returns |
Type |
Description |
ByteString |
The bytes for model.
|
public SpeechModelVariant getModelVariant()
Which variant of the Speech
model to use.
.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;
public int getModelVariantValue()
Which variant of the Speech
model to use.
.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;
Returns |
Type |
Description |
int |
The enum numeric value on the wire for modelVariant.
|
public Parser<InputAudioConfig> getParserForType()
Overrides
public String getPhraseHints(int index)
Deprecated. google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
google/cloud/dialogflow/v2/audio_config.proto;l=129
A list of strings containing words and phrases that the speech
recognizer should recognize with higher likelihood.
See the Cloud Speech
documentation
for more details.
This field is deprecated. Please use speech_contexts
instead. If you
specify both phrase_hints
and speech_contexts
, Dialogflow will
treat the phrase_hints
as a single additional SpeechContext
.
repeated string phrase_hints = 4 [deprecated = true];
Parameter |
Name |
Description |
index |
int
The index of the element to return.
|
Returns |
Type |
Description |
String |
The phraseHints at the given index.
|
public ByteString getPhraseHintsBytes(int index)
Deprecated. google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
google/cloud/dialogflow/v2/audio_config.proto;l=129
A list of strings containing words and phrases that the speech
recognizer should recognize with higher likelihood.
See the Cloud Speech
documentation
for more details.
This field is deprecated. Please use speech_contexts
instead. If you
specify both phrase_hints
and speech_contexts
, Dialogflow will
treat the phrase_hints
as a single additional SpeechContext
.
repeated string phrase_hints = 4 [deprecated = true];
Parameter |
Name |
Description |
index |
int
The index of the value to return.
|
Returns |
Type |
Description |
ByteString |
The bytes of the phraseHints at the given index.
|
public int getPhraseHintsCount()
Deprecated. google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
google/cloud/dialogflow/v2/audio_config.proto;l=129
A list of strings containing words and phrases that the speech
recognizer should recognize with higher likelihood.
See the Cloud Speech
documentation
for more details.
This field is deprecated. Please use speech_contexts
instead. If you
specify both phrase_hints
and speech_contexts
, Dialogflow will
treat the phrase_hints
as a single additional SpeechContext
.
repeated string phrase_hints = 4 [deprecated = true];
Returns |
Type |
Description |
int |
The count of phraseHints.
|
public ProtocolStringList getPhraseHintsList()
Deprecated. google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
google/cloud/dialogflow/v2/audio_config.proto;l=129
A list of strings containing words and phrases that the speech
recognizer should recognize with higher likelihood.
See the Cloud Speech
documentation
for more details.
This field is deprecated. Please use speech_contexts
instead. If you
specify both phrase_hints
and speech_contexts
, Dialogflow will
treat the phrase_hints
as a single additional SpeechContext
.
repeated string phrase_hints = 4 [deprecated = true];
public int getSampleRateHertz()
Required. Sample rate (in Hertz) of the audio content sent in the query.
Refer to Cloud Speech API
documentation for
more details.
int32 sample_rate_hertz = 2 [(.google.api.field_behavior) = REQUIRED];
Returns |
Type |
Description |
int |
The sampleRateHertz.
|
public int getSerializedSize()
Returns |
Type |
Description |
int |
|
Overrides
public boolean getSingleUtterance()
If false
(default), recognition does not cease until the
client closes the stream.
If true
, the recognizer will detect a single spoken utterance in input
audio. Recognition ceases when it detects the audio's voice has
stopped or paused. In this case, once a detected intent is received, the
client should close the stream and start a new request with a new stream as
needed.
Note: This setting is relevant only for streaming methods.
Note: When specified, InputAudioConfig.single_utterance takes precedence
over StreamingDetectIntentRequest.single_utterance.
bool single_utterance = 8;
Returns |
Type |
Description |
boolean |
The singleUtterance.
|
getSpeechContexts(int index)
public SpeechContext getSpeechContexts(int index)
Context information to assist speech recognition.
See the Cloud Speech
documentation
for more details.
repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
Parameter |
Name |
Description |
index |
int
|
getSpeechContextsCount()
public int getSpeechContextsCount()
Context information to assist speech recognition.
See the Cloud Speech
documentation
for more details.
repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
Returns |
Type |
Description |
int |
|
getSpeechContextsList()
public List<SpeechContext> getSpeechContextsList()
Context information to assist speech recognition.
See the Cloud Speech
documentation
for more details.
repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
getSpeechContextsOrBuilder(int index)
public SpeechContextOrBuilder getSpeechContextsOrBuilder(int index)
Context information to assist speech recognition.
See the Cloud Speech
documentation
for more details.
repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
Parameter |
Name |
Description |
index |
int
|
getSpeechContextsOrBuilderList()
public List<? extends SpeechContextOrBuilder> getSpeechContextsOrBuilderList()
Context information to assist speech recognition.
See the Cloud Speech
documentation
for more details.
repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;
Returns |
Type |
Description |
List<? extends com.google.cloud.dialogflow.v2.SpeechContextOrBuilder> |
|
Returns |
Type |
Description |
int |
|
Overrides
protected GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()
Overrides
public final boolean isInitialized()
Overrides
public InputAudioConfig.Builder newBuilderForType()
protected InputAudioConfig.Builder newBuilderForType(GeneratedMessageV3.BuilderParent parent)
Overrides
protected Object newInstance(GeneratedMessageV3.UnusedPrivateParameter unused)
Returns |
Type |
Description |
Object |
|
Overrides
public InputAudioConfig.Builder toBuilder()
public void writeTo(CodedOutputStream output)
Overrides