Reference documentation and code samples for the Cloud Speech-to-Text V2 API class Google::Cloud::Speech::V2::RecognitionConfig.
Provides information to the Recognizer that specifies how to process the recognition request.
Inherits
- Object
Extended By
- Google::Protobuf::MessageExts::ClassMethods
Includes
- Google::Protobuf::MessageExts
Methods
#adaptation
def adaptation() -> ::Google::Cloud::Speech::V2::SpeechAdaptation
- (::Google::Cloud::Speech::V2::SpeechAdaptation) — Speech adaptation context that weights recognizer predictions for specific words and phrases.
#adaptation=
def adaptation=(value) -> ::Google::Cloud::Speech::V2::SpeechAdaptation
- value (::Google::Cloud::Speech::V2::SpeechAdaptation) — Speech adaptation context that weights recognizer predictions for specific words and phrases.
- (::Google::Cloud::Speech::V2::SpeechAdaptation) — Speech adaptation context that weights recognizer predictions for specific words and phrases.
#auto_decoding_config
def auto_decoding_config() -> ::Google::Cloud::Speech::V2::AutoDetectDecodingConfig
- (::Google::Cloud::Speech::V2::AutoDetectDecodingConfig) — Automatically detect decoding parameters. Preferred for supported formats.
#auto_decoding_config=
def auto_decoding_config=(value) -> ::Google::Cloud::Speech::V2::AutoDetectDecodingConfig
- value (::Google::Cloud::Speech::V2::AutoDetectDecodingConfig) — Automatically detect decoding parameters. Preferred for supported formats.
- (::Google::Cloud::Speech::V2::AutoDetectDecodingConfig) — Automatically detect decoding parameters. Preferred for supported formats.
#explicit_decoding_config
def explicit_decoding_config() -> ::Google::Cloud::Speech::V2::ExplicitDecodingConfig
- (::Google::Cloud::Speech::V2::ExplicitDecodingConfig) — Explicitly specified decoding parameters. Required if using headerless PCM audio (linear16, mulaw, alaw).
#explicit_decoding_config=
def explicit_decoding_config=(value) -> ::Google::Cloud::Speech::V2::ExplicitDecodingConfig
- value (::Google::Cloud::Speech::V2::ExplicitDecodingConfig) — Explicitly specified decoding parameters. Required if using headerless PCM audio (linear16, mulaw, alaw).
- (::Google::Cloud::Speech::V2::ExplicitDecodingConfig) — Explicitly specified decoding parameters. Required if using headerless PCM audio (linear16, mulaw, alaw).
#features
def features() -> ::Google::Cloud::Speech::V2::RecognitionFeatures
- (::Google::Cloud::Speech::V2::RecognitionFeatures) — Speech recognition features to enable.
#features=
def features=(value) -> ::Google::Cloud::Speech::V2::RecognitionFeatures
- value (::Google::Cloud::Speech::V2::RecognitionFeatures) — Speech recognition features to enable.
- (::Google::Cloud::Speech::V2::RecognitionFeatures) — Speech recognition features to enable.
#language_codes
def language_codes() -> ::Array<::String>
-
(::Array<::String>) — Optional. The language of the supplied audio as a
BCP-47 language tag.
Language tags are normalized to BCP-47 before they are used eg "en-us"
becomes "en-US".
Supported languages for each model are listed in the Table of Supported Models.
If additional languages are provided, recognition result will contain recognition in the most likely language detected. The recognition result will include the language tag of the language detected in the audio.
#language_codes=
def language_codes=(value) -> ::Array<::String>
-
value (::Array<::String>) — Optional. The language of the supplied audio as a
BCP-47 language tag.
Language tags are normalized to BCP-47 before they are used eg "en-us"
becomes "en-US".
Supported languages for each model are listed in the Table of Supported Models.
If additional languages are provided, recognition result will contain recognition in the most likely language detected. The recognition result will include the language tag of the language detected in the audio.
-
(::Array<::String>) — Optional. The language of the supplied audio as a
BCP-47 language tag.
Language tags are normalized to BCP-47 before they are used eg "en-us"
becomes "en-US".
Supported languages for each model are listed in the Table of Supported Models.
If additional languages are provided, recognition result will contain recognition in the most likely language detected. The recognition result will include the language tag of the language detected in the audio.
#model
def model() -> ::String
-
(::String) — Optional. Which model to use for recognition requests. Select the model
best suited to your domain to get best results.
Guidance for choosing which model to use can be found in the Transcription Models Documentation and the models supported in each region can be found in the Table Of Supported Models.
#model=
def model=(value) -> ::String
-
value (::String) — Optional. Which model to use for recognition requests. Select the model
best suited to your domain to get best results.
Guidance for choosing which model to use can be found in the Transcription Models Documentation and the models supported in each region can be found in the Table Of Supported Models.
-
(::String) — Optional. Which model to use for recognition requests. Select the model
best suited to your domain to get best results.
Guidance for choosing which model to use can be found in the Transcription Models Documentation and the models supported in each region can be found in the Table Of Supported Models.
#transcript_normalization
def transcript_normalization() -> ::Google::Cloud::Speech::V2::TranscriptNormalization
- (::Google::Cloud::Speech::V2::TranscriptNormalization) — Optional. Use transcription normalization to automatically replace parts of the transcript with phrases of your choosing. For StreamingRecognize, this normalization only applies to stable partial transcripts (stability > 0.8) and final transcripts.
#transcript_normalization=
def transcript_normalization=(value) -> ::Google::Cloud::Speech::V2::TranscriptNormalization
- value (::Google::Cloud::Speech::V2::TranscriptNormalization) — Optional. Use transcription normalization to automatically replace parts of the transcript with phrases of your choosing. For StreamingRecognize, this normalization only applies to stable partial transcripts (stability > 0.8) and final transcripts.
- (::Google::Cloud::Speech::V2::TranscriptNormalization) — Optional. Use transcription normalization to automatically replace parts of the transcript with phrases of your choosing. For StreamingRecognize, this normalization only applies to stable partial transcripts (stability > 0.8) and final transcripts.
#translation_config
def translation_config() -> ::Google::Cloud::Speech::V2::TranslationConfig
- (::Google::Cloud::Speech::V2::TranslationConfig) — Optional. Optional configuration used to automatically run translation on the given audio to the desired language for supported models.
#translation_config=
def translation_config=(value) -> ::Google::Cloud::Speech::V2::TranslationConfig
- value (::Google::Cloud::Speech::V2::TranslationConfig) — Optional. Optional configuration used to automatically run translation on the given audio to the desired language for supported models.
- (::Google::Cloud::Speech::V2::TranslationConfig) — Optional. Optional configuration used to automatically run translation on the given audio to the desired language for supported models.