- 4.59.0 (latest)
- 4.58.0
- 4.57.0
- 4.55.0
- 4.54.0
- 4.53.0
- 4.52.0
- 4.51.0
- 4.50.0
- 4.49.0
- 4.48.0
- 4.47.0
- 4.46.0
- 4.45.0
- 4.43.0
- 4.42.0
- 4.41.0
- 4.40.0
- 4.39.0
- 4.38.0
- 4.37.0
- 4.36.0
- 4.35.0
- 4.34.0
- 4.33.0
- 4.30.0
- 4.29.0
- 4.28.0
- 4.27.0
- 4.26.0
- 4.25.0
- 4.24.0
- 4.23.0
- 4.22.0
- 4.21.0
- 4.20.0
- 4.19.0
- 4.18.0
- 4.17.0
- 4.15.0
- 4.14.0
- 4.13.0
- 4.12.0
- 4.11.0
- 4.10.0
- 4.9.1
- 4.8.6
- 4.7.5
- 4.6.0
- 4.5.11
- 4.4.0
- 4.3.1
public interface StreamingRecognitionResultOrBuilder extends MessageOrBuilder
Implements
MessageOrBuilderMethods
getConfidence()
public abstract float getConfidence()
The Speech confidence between 0.0 and 1.0 for the current portion of audio.
A higher number indicates an estimated greater likelihood that the
recognized words are correct. The default of 0.0 is a sentinel value
indicating that confidence was not set.
This field is typically only provided if is_final
is true and you should
not rely on it being accurate or even set.
float confidence = 4;
Type | Description |
float |
The confidence. |
getIsFinal()
public abstract boolean getIsFinal()
If false
, the StreamingRecognitionResult
represents an
interim result that may change. If true
, the recognizer will not return
any further hypotheses about this piece of the audio. May only be populated
for message_type
= TRANSCRIPT
.
bool is_final = 3;
Type | Description |
boolean |
The isFinal. |
getLanguageCode()
public abstract String getLanguageCode()
Detected language code for the transcript.
string language_code = 10;
Type | Description |
String |
The languageCode. |
getLanguageCodeBytes()
public abstract ByteString getLanguageCodeBytes()
Detected language code for the transcript.
string language_code = 10;
Type | Description |
ByteString |
The bytes for languageCode. |
getMessageType()
public abstract StreamingRecognitionResult.MessageType getMessageType()
Type of the result message.
.google.cloud.dialogflow.v2.StreamingRecognitionResult.MessageType message_type = 1;
Type | Description |
StreamingRecognitionResult.MessageType |
The messageType. |
getMessageTypeValue()
public abstract int getMessageTypeValue()
Type of the result message.
.google.cloud.dialogflow.v2.StreamingRecognitionResult.MessageType message_type = 1;
Type | Description |
int |
The enum numeric value on the wire for messageType. |
getSpeechEndOffset()
public abstract Duration getSpeechEndOffset()
Time offset of the end of this Speech recognition result relative to the
beginning of the audio. Only populated for message_type
= TRANSCRIPT
.
.google.protobuf.Duration speech_end_offset = 8;
Type | Description |
Duration |
The speechEndOffset. |
getSpeechEndOffsetOrBuilder()
public abstract DurationOrBuilder getSpeechEndOffsetOrBuilder()
Time offset of the end of this Speech recognition result relative to the
beginning of the audio. Only populated for message_type
= TRANSCRIPT
.
.google.protobuf.Duration speech_end_offset = 8;
Type | Description |
DurationOrBuilder |
getSpeechWordInfo(int index)
public abstract SpeechWordInfo getSpeechWordInfo(int index)
Word-specific information for the words recognized by Speech in
transcript.
Populated if and only if message_type
= TRANSCRIPT
and
[InputAudioConfig.enable_word_info] is set.
repeated .google.cloud.dialogflow.v2.SpeechWordInfo speech_word_info = 7;
Name | Description |
index |
int |
Type | Description |
SpeechWordInfo |
getSpeechWordInfoCount()
public abstract int getSpeechWordInfoCount()
Word-specific information for the words recognized by Speech in
transcript.
Populated if and only if message_type
= TRANSCRIPT
and
[InputAudioConfig.enable_word_info] is set.
repeated .google.cloud.dialogflow.v2.SpeechWordInfo speech_word_info = 7;
Type | Description |
int |
getSpeechWordInfoList()
public abstract List<SpeechWordInfo> getSpeechWordInfoList()
Word-specific information for the words recognized by Speech in
transcript.
Populated if and only if message_type
= TRANSCRIPT
and
[InputAudioConfig.enable_word_info] is set.
repeated .google.cloud.dialogflow.v2.SpeechWordInfo speech_word_info = 7;
Type | Description |
List<SpeechWordInfo> |
getSpeechWordInfoOrBuilder(int index)
public abstract SpeechWordInfoOrBuilder getSpeechWordInfoOrBuilder(int index)
Word-specific information for the words recognized by Speech in
transcript.
Populated if and only if message_type
= TRANSCRIPT
and
[InputAudioConfig.enable_word_info] is set.
repeated .google.cloud.dialogflow.v2.SpeechWordInfo speech_word_info = 7;
Name | Description |
index |
int |
Type | Description |
SpeechWordInfoOrBuilder |
getSpeechWordInfoOrBuilderList()
public abstract List<? extends SpeechWordInfoOrBuilder> getSpeechWordInfoOrBuilderList()
Word-specific information for the words recognized by Speech in
transcript.
Populated if and only if message_type
= TRANSCRIPT
and
[InputAudioConfig.enable_word_info] is set.
repeated .google.cloud.dialogflow.v2.SpeechWordInfo speech_word_info = 7;
Type | Description |
List<? extends com.google.cloud.dialogflow.v2.SpeechWordInfoOrBuilder> |
getTranscript()
public abstract String getTranscript()
Transcript text representing the words that the user spoke.
Populated if and only if message_type
= TRANSCRIPT
.
string transcript = 2;
Type | Description |
String |
The transcript. |
getTranscriptBytes()
public abstract ByteString getTranscriptBytes()
Transcript text representing the words that the user spoke.
Populated if and only if message_type
= TRANSCRIPT
.
string transcript = 2;
Type | Description |
ByteString |
The bytes for transcript. |
hasSpeechEndOffset()
public abstract boolean hasSpeechEndOffset()
Time offset of the end of this Speech recognition result relative to the
beginning of the audio. Only populated for message_type
= TRANSCRIPT
.
.google.protobuf.Duration speech_end_offset = 8;
Type | Description |
boolean |
Whether the speechEndOffset field is set. |