Variables
Recognizer_State_name, Recognizer_State_value
var (
Recognizer_State_name = map[int32]string{
0: "STATE_UNSPECIFIED",
2: "ACTIVE",
4: "DELETED",
}
Recognizer_State_value = map[string]int32{
"STATE_UNSPECIFIED": 0,
"ACTIVE": 2,
"DELETED": 4,
}
)
Enum value maps for Recognizer_State.
ExplicitDecodingConfig_AudioEncoding_name, ExplicitDecodingConfig_AudioEncoding_value
var (
ExplicitDecodingConfig_AudioEncoding_name = map[int32]string{
0: "AUDIO_ENCODING_UNSPECIFIED",
1: "LINEAR16",
2: "MULAW",
3: "ALAW",
}
ExplicitDecodingConfig_AudioEncoding_value = map[string]int32{
"AUDIO_ENCODING_UNSPECIFIED": 0,
"LINEAR16": 1,
"MULAW": 2,
"ALAW": 3,
}
)
Enum value maps for ExplicitDecodingConfig_AudioEncoding.
RecognitionFeatures_MultiChannelMode_name, RecognitionFeatures_MultiChannelMode_value
var (
RecognitionFeatures_MultiChannelMode_name = map[int32]string{
0: "MULTI_CHANNEL_MODE_UNSPECIFIED",
1: "SEPARATE_RECOGNITION_PER_CHANNEL",
}
RecognitionFeatures_MultiChannelMode_value = map[string]int32{
"MULTI_CHANNEL_MODE_UNSPECIFIED": 0,
"SEPARATE_RECOGNITION_PER_CHANNEL": 1,
}
)
Enum value maps for RecognitionFeatures_MultiChannelMode.
StreamingRecognizeResponse_SpeechEventType_name, StreamingRecognizeResponse_SpeechEventType_value
var (
StreamingRecognizeResponse_SpeechEventType_name = map[int32]string{
0: "SPEECH_EVENT_TYPE_UNSPECIFIED",
1: "END_OF_SINGLE_UTTERANCE",
2: "SPEECH_ACTIVITY_BEGIN",
3: "SPEECH_ACTIVITY_END",
}
StreamingRecognizeResponse_SpeechEventType_value = map[string]int32{
"SPEECH_EVENT_TYPE_UNSPECIFIED": 0,
"END_OF_SINGLE_UTTERANCE": 1,
"SPEECH_ACTIVITY_BEGIN": 2,
"SPEECH_ACTIVITY_END": 3,
}
)
Enum value maps for StreamingRecognizeResponse_SpeechEventType.
CustomClass_State_name, CustomClass_State_value
var (
CustomClass_State_name = map[int32]string{
0: "STATE_UNSPECIFIED",
2: "ACTIVE",
4: "DELETED",
}
CustomClass_State_value = map[string]int32{
"STATE_UNSPECIFIED": 0,
"ACTIVE": 2,
"DELETED": 4,
}
)
Enum value maps for CustomClass_State.
PhraseSet_State_name, PhraseSet_State_value
var (
PhraseSet_State_name = map[int32]string{
0: "STATE_UNSPECIFIED",
2: "ACTIVE",
4: "DELETED",
}
PhraseSet_State_value = map[string]int32{
"STATE_UNSPECIFIED": 0,
"ACTIVE": 2,
"DELETED": 4,
}
)
Enum value maps for PhraseSet_State.
File_google_cloud_speech_v2_cloud_speech_proto
var File_google_cloud_speech_v2_cloud_speech_proto protoreflect.FileDescriptor
Functions
func RegisterSpeechServer
func RegisterSpeechServer(s *grpc.Server, srv SpeechServer)
AutoDetectDecodingConfig
type AutoDetectDecodingConfig struct {
// contains filtered or unexported fields
}
Automatically detected decoding parameters. Supported for the following encodings:
- WAV_LINEAR16: 16-bit signed little-endian PCM samples in a WAV container.
- WAV_MULAW: 8-bit companded mulaw samples in a WAV container.
- WAV_ALAW: 8-bit companded alaw samples in a WAV container.
- RFC4867_5_AMR: AMR frames with an rfc4867.5 header.
- RFC4867_5_AMRWB: AMR-WB frames with an rfc4867.5 header.
- FLAC: FLAC frames in the "native FLAC" container format.
- MP3: MPEG audio frames with optional (ignored) ID3 metadata.
- OGG_OPUS: Opus audio frames in an Ogg container.
- WEBM_OPUS: Opus audio frames in a WebM container.
func (*AutoDetectDecodingConfig) Descriptor
func (*AutoDetectDecodingConfig) Descriptor() ([]byte, []int)
Deprecated: Use AutoDetectDecodingConfig.ProtoReflect.Descriptor instead.
func (*AutoDetectDecodingConfig) ProtoMessage
func (*AutoDetectDecodingConfig) ProtoMessage()
func (*AutoDetectDecodingConfig) ProtoReflect
func (x *AutoDetectDecodingConfig) ProtoReflect() protoreflect.Message
func (*AutoDetectDecodingConfig) Reset
func (x *AutoDetectDecodingConfig) Reset()
func (*AutoDetectDecodingConfig) String
func (x *AutoDetectDecodingConfig) String() string
BatchRecognizeFileMetadata
type BatchRecognizeFileMetadata struct {
// The audio source, which is a Google Cloud Storage URI.
//
// Types that are assignable to AudioSource:
//
// *BatchRecognizeFileMetadata_Uri
AudioSource isBatchRecognizeFileMetadata_AudioSource `protobuf_oneof:"audio_source"`
// Features and audio metadata to use for the Automatic Speech Recognition.
// This field in combination with the
// [config_mask][google.cloud.speech.v2.BatchRecognizeFileMetadata.config_mask]
// field can be used to override parts of the
// [default_recognition_config][google.cloud.speech.v2.Recognizer.default_recognition_config]
// of the Recognizer resource as well as the
// [config][google.cloud.speech.v2.BatchRecognizeRequest.config] at the
// request level.
Config *RecognitionConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"`
// The list of fields in
// [config][google.cloud.speech.v2.BatchRecognizeFileMetadata.config] that
// override the values in the
// [default_recognition_config][google.cloud.speech.v2.Recognizer.default_recognition_config]
// of the recognizer during this recognition request. If no mask is provided,
// all non-default valued fields in
// [config][google.cloud.speech.v2.BatchRecognizeFileMetadata.config] override
// the values in the recognizer for this recognition request. If a mask is
// provided, only the fields listed in the mask override the config in the
// recognizer for this recognition request. If a wildcard (`*`) is provided,
// [config][google.cloud.speech.v2.BatchRecognizeFileMetadata.config]
// completely overrides and replaces the config in the recognizer for this
// recognition request.
ConfigMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=config_mask,json=configMask,proto3" json:"config_mask,omitempty"`
// contains filtered or unexported fields
}
Metadata about a single file in a batch for BatchRecognize.
func (*BatchRecognizeFileMetadata) Descriptor
func (*BatchRecognizeFileMetadata) Descriptor() ([]byte, []int)
Deprecated: Use BatchRecognizeFileMetadata.ProtoReflect.Descriptor instead.
func (*BatchRecognizeFileMetadata) GetAudioSource
func (m *BatchRecognizeFileMetadata) GetAudioSource() isBatchRecognizeFileMetadata_AudioSource
func (*BatchRecognizeFileMetadata) GetConfig
func (x *BatchRecognizeFileMetadata) GetConfig() *RecognitionConfig
func (*BatchRecognizeFileMetadata) GetConfigMask
func (x *BatchRecognizeFileMetadata) GetConfigMask() *fieldmaskpb.FieldMask
func (*BatchRecognizeFileMetadata) GetUri
func (x *BatchRecognizeFileMetadata) GetUri() string
func (*BatchRecognizeFileMetadata) ProtoMessage
func (*BatchRecognizeFileMetadata) ProtoMessage()
func (*BatchRecognizeFileMetadata) ProtoReflect
func (x *BatchRecognizeFileMetadata) ProtoReflect() protoreflect.Message
func (*BatchRecognizeFileMetadata) Reset
func (x *BatchRecognizeFileMetadata) Reset()
func (*BatchRecognizeFileMetadata) String
func (x *BatchRecognizeFileMetadata) String() string
BatchRecognizeFileMetadata_Uri
type BatchRecognizeFileMetadata_Uri struct {
// Cloud Storage URI for the audio file.
Uri string `protobuf:"bytes,1,opt,name=uri,proto3,oneof"`
}
BatchRecognizeFileResult
type BatchRecognizeFileResult struct {
// The GCS URI to which recognition results were written.
Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"`
// Error if one was encountered.
Error *status.Status `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
// contains filtered or unexported fields
}
Final results for a single file.
func (*BatchRecognizeFileResult) Descriptor
func (*BatchRecognizeFileResult) Descriptor() ([]byte, []int)
Deprecated: Use BatchRecognizeFileResult.ProtoReflect.Descriptor instead.
func (*BatchRecognizeFileResult) GetError
func (x *BatchRecognizeFileResult) GetError() *status.Status
func (*BatchRecognizeFileResult) GetUri
func (x *BatchRecognizeFileResult) GetUri() string
func (*BatchRecognizeFileResult) ProtoMessage
func (*BatchRecognizeFileResult) ProtoMessage()
func (*BatchRecognizeFileResult) ProtoReflect
func (x *BatchRecognizeFileResult) ProtoReflect() protoreflect.Message
func (*BatchRecognizeFileResult) Reset
func (x *BatchRecognizeFileResult) Reset()
func (*BatchRecognizeFileResult) String
func (x *BatchRecognizeFileResult) String() string
BatchRecognizeMetadata
type BatchRecognizeMetadata struct {
TranscriptionMetadata map[string]*BatchRecognizeTranscriptionMetadata "" /* 212 byte string literal not displayed */
}
Operation metadata for [BatchRecognize][google.cloud.speech.v2.Speech.BatchRecognize].
func (*BatchRecognizeMetadata) Descriptor
func (*BatchRecognizeMetadata) Descriptor() ([]byte, []int)
Deprecated: Use BatchRecognizeMetadata.ProtoReflect.Descriptor instead.
func (*BatchRecognizeMetadata) GetTranscriptionMetadata
func (x *BatchRecognizeMetadata) GetTranscriptionMetadata() map[string]*BatchRecognizeTranscriptionMetadata
func (*BatchRecognizeMetadata) ProtoMessage
func (*BatchRecognizeMetadata) ProtoMessage()
func (*BatchRecognizeMetadata) ProtoReflect
func (x *BatchRecognizeMetadata) ProtoReflect() protoreflect.Message
func (*BatchRecognizeMetadata) Reset
func (x *BatchRecognizeMetadata) Reset()
func (*BatchRecognizeMetadata) String
func (x *BatchRecognizeMetadata) String() string
BatchRecognizeRequest
type BatchRecognizeRequest struct {
// Required. Resource name of the recognizer to be used for ASR.
Recognizer string `protobuf:"bytes,1,opt,name=recognizer,proto3" json:"recognizer,omitempty"`
// Features and audio metadata to use for the Automatic Speech Recognition.
// This field in combination with the
// [config_mask][google.cloud.speech.v2.BatchRecognizeRequest.config_mask]
// field can be used to override parts of the
// [default_recognition_config][google.cloud.speech.v2.Recognizer.default_recognition_config]
// of the Recognizer resource.
Config *RecognitionConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"`
// The list of fields in
// [config][google.cloud.speech.v2.BatchRecognizeRequest.config] that override
// the values in the
// [default_recognition_config][google.cloud.speech.v2.Recognizer.default_recognition_config]
// of the recognizer during this recognition request. If no mask is provided,
// all given fields in
// [config][google.cloud.speech.v2.BatchRecognizeRequest.config] override the
// values in the recognizer for this recognition request. If a mask is
// provided, only the fields listed in the mask override the config in the
// recognizer for this recognition request. If a wildcard (`*`) is provided,
// [config][google.cloud.speech.v2.BatchRecognizeRequest.config] completely
// overrides and replaces the config in the recognizer for this recognition
// request.
ConfigMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=config_mask,json=configMask,proto3" json:"config_mask,omitempty"`
// Audio files with file metadata for ASR.
Files []*BatchRecognizeFileMetadata `protobuf:"bytes,3,rep,name=files,proto3" json:"files,omitempty"`
// contains filtered or unexported fields
}
Request message for the [BatchRecognize][google.cloud.speech.v2.Speech.BatchRecognize] method.
func (*BatchRecognizeRequest) Descriptor
func (*BatchRecognizeRequest) Descriptor() ([]byte, []int)
Deprecated: Use BatchRecognizeRequest.ProtoReflect.Descriptor instead.
func (*BatchRecognizeRequest) GetConfig
func (x *BatchRecognizeRequest) GetConfig() *RecognitionConfig
func (*BatchRecognizeRequest) GetConfigMask
func (x *BatchRecognizeRequest) GetConfigMask() *fieldmaskpb.FieldMask
func (*BatchRecognizeRequest) GetFiles
func (x *BatchRecognizeRequest) GetFiles() []*BatchRecognizeFileMetadata
func (*BatchRecognizeRequest) GetRecognizer
func (x *BatchRecognizeRequest) GetRecognizer() string
func (*BatchRecognizeRequest) ProtoMessage
func (*BatchRecognizeRequest) ProtoMessage()
func (*BatchRecognizeRequest) ProtoReflect
func (x *BatchRecognizeRequest) ProtoReflect() protoreflect.Message
func (*BatchRecognizeRequest) Reset
func (x *BatchRecognizeRequest) Reset()
func (*BatchRecognizeRequest) String
func (x *BatchRecognizeRequest) String() string
BatchRecognizeResponse
type BatchRecognizeResponse struct {
Results map[string]*BatchRecognizeFileResult "" /* 155 byte string literal not displayed */
}
Response message for [BatchRecognize][google.cloud.speech.v2.Speech.BatchRecognize] that is packaged into a longrunning [Operation][google.longrunning.Operation].
func (*BatchRecognizeResponse) Descriptor
func (*BatchRecognizeResponse) Descriptor() ([]byte, []int)
Deprecated: Use BatchRecognizeResponse.ProtoReflect.Descriptor instead.
func (*BatchRecognizeResponse) GetResults
func (x *BatchRecognizeResponse) GetResults() map[string]*BatchRecognizeFileResult
func (*BatchRecognizeResponse) ProtoMessage
func (*BatchRecognizeResponse) ProtoMessage()
func (*BatchRecognizeResponse) ProtoReflect
func (x *BatchRecognizeResponse) ProtoReflect() protoreflect.Message
func (*BatchRecognizeResponse) Reset
func (x *BatchRecognizeResponse) Reset()
func (*BatchRecognizeResponse) String
func (x *BatchRecognizeResponse) String() string
BatchRecognizeTranscriptionMetadata
type BatchRecognizeTranscriptionMetadata struct {
// How much of the file has been transcribed so far.
ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
// Error if one was encountered.
Error *status.Status `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
// The GCS URI to which recognition results will be written.
Uri string `protobuf:"bytes,3,opt,name=uri,proto3" json:"uri,omitempty"`
// contains filtered or unexported fields
}
Metadata about transcription for a single file (for example, progress percent).
func (*BatchRecognizeTranscriptionMetadata) Descriptor
func (*BatchRecognizeTranscriptionMetadata) Descriptor() ([]byte, []int)
Deprecated: Use BatchRecognizeTranscriptionMetadata.ProtoReflect.Descriptor instead.
func (*BatchRecognizeTranscriptionMetadata) GetError
func (x *BatchRecognizeTranscriptionMetadata) GetError() *status.Status
func (*BatchRecognizeTranscriptionMetadata) GetProgressPercent
func (x *BatchRecognizeTranscriptionMetadata) GetProgressPercent() int32
func (*BatchRecognizeTranscriptionMetadata) GetUri
func (x *BatchRecognizeTranscriptionMetadata) GetUri() string
func (*BatchRecognizeTranscriptionMetadata) ProtoMessage
func (*BatchRecognizeTranscriptionMetadata) ProtoMessage()
func (*BatchRecognizeTranscriptionMetadata) ProtoReflect
func (x *BatchRecognizeTranscriptionMetadata) ProtoReflect() protoreflect.Message
func (*BatchRecognizeTranscriptionMetadata) Reset
func (x *BatchRecognizeTranscriptionMetadata) Reset()
func (*BatchRecognizeTranscriptionMetadata) String
func (x *BatchRecognizeTranscriptionMetadata) String() string
Config
type Config struct {
// Output only. The name of the config resource. There is exactly one config
// resource per project per location. The expected format is
// `projects/{project}/locations/{location}/config`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Optional. An optional [KMS key
// name](https://cloud.google.com/kms/docs/resource-hierarchy#keys) that if
// present, will be used to encrypt Speech-to-Text resources at-rest. Updating
// this key will not encrypt existing resources using this key; only new
// resources will be encrypted using this key. The expected format is
// `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`.
KmsKeyName string `protobuf:"bytes,2,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"`
// Output only. The most recent time this resource was modified.
UpdateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
// contains filtered or unexported fields
}
Message representing the config for the Speech-to-Text API. This includes an optional KMS key with which incoming data will be encrypted.
func (*Config) Descriptor
Deprecated: Use Config.ProtoReflect.Descriptor instead.
func (*Config) GetKmsKeyName
func (*Config) GetName
func (*Config) GetUpdateTime
func (x *Config) GetUpdateTime() *timestamppb.Timestamp
func (*Config) ProtoMessage
func (*Config) ProtoMessage()
func (*Config) ProtoReflect
func (x *Config) ProtoReflect() protoreflect.Message
func (*Config) Reset
func (x *Config) Reset()
func (*Config) String
CreateCustomClassRequest
type CreateCustomClassRequest struct {
// Required. The CustomClass to create.
CustomClass *CustomClass `protobuf:"bytes,1,opt,name=custom_class,json=customClass,proto3" json:"custom_class,omitempty"`
// If set, validate the request and preview the CustomClass, but do not
// actually create it.
ValidateOnly bool `protobuf:"varint,2,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
// The ID to use for the CustomClass, which will become the final component of
// the CustomClass's resource name.
//
// This value should be 4-63 characters, and valid characters
// are /[a-z][0-9]-/.
CustomClassId string `protobuf:"bytes,3,opt,name=custom_class_id,json=customClassId,proto3" json:"custom_class_id,omitempty"`
// Required. The project and location where this CustomClass will be created.
// The expected format is `projects/{project}/locations/{location}`.
Parent string `protobuf:"bytes,4,opt,name=parent,proto3" json:"parent,omitempty"`
// contains filtered or unexported fields
}
Request message for the [CreateCustomClass][google.cloud.speech.v2.Speech.CreateCustomClass] method.
func (*CreateCustomClassRequest) Descriptor
func (*CreateCustomClassRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateCustomClassRequest.ProtoReflect.Descriptor instead.
func (*CreateCustomClassRequest) GetCustomClass
func (x *CreateCustomClassRequest) GetCustomClass() *CustomClass
func (*CreateCustomClassRequest) GetCustomClassId
func (x *CreateCustomClassRequest) GetCustomClassId() string
func (*CreateCustomClassRequest) GetParent
func (x *CreateCustomClassRequest) GetParent() string
func (*CreateCustomClassRequest) GetValidateOnly
func (x *CreateCustomClassRequest) GetValidateOnly() bool
func (*CreateCustomClassRequest) ProtoMessage
func (*CreateCustomClassRequest) ProtoMessage()
func (*CreateCustomClassRequest) ProtoReflect
func (x *CreateCustomClassRequest) ProtoReflect() protoreflect.Message
func (*CreateCustomClassRequest) Reset
func (x *CreateCustomClassRequest) Reset()
func (*CreateCustomClassRequest) String
func (x *CreateCustomClassRequest) String() string
CreatePhraseSetRequest
type CreatePhraseSetRequest struct {
// Required. The PhraseSet to create.
PhraseSet *PhraseSet `protobuf:"bytes,1,opt,name=phrase_set,json=phraseSet,proto3" json:"phrase_set,omitempty"`
// If set, validate the request and preview the PhraseSet, but do not
// actually create it.
ValidateOnly bool `protobuf:"varint,2,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
// The ID to use for the PhraseSet, which will become the final component of
// the PhraseSet's resource name.
//
// This value should be 4-63 characters, and valid characters
// are /[a-z][0-9]-/.
PhraseSetId string `protobuf:"bytes,3,opt,name=phrase_set_id,json=phraseSetId,proto3" json:"phrase_set_id,omitempty"`
// Required. The project and location where this PhraseSet will be created.
// The expected format is `projects/{project}/locations/{location}`.
Parent string `protobuf:"bytes,4,opt,name=parent,proto3" json:"parent,omitempty"`
// contains filtered or unexported fields
}
Request message for the [CreatePhraseSet][google.cloud.speech.v2.Speech.CreatePhraseSet] method.
func (*CreatePhraseSetRequest) Descriptor
func (*CreatePhraseSetRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreatePhraseSetRequest.ProtoReflect.Descriptor instead.
func (*CreatePhraseSetRequest) GetParent
func (x *CreatePhraseSetRequest) GetParent() string
func (*CreatePhraseSetRequest) GetPhraseSet
func (x *CreatePhraseSetRequest) GetPhraseSet() *PhraseSet
func (*CreatePhraseSetRequest) GetPhraseSetId
func (x *CreatePhraseSetRequest) GetPhraseSetId() string
func (*CreatePhraseSetRequest) GetValidateOnly
func (x *CreatePhraseSetRequest) GetValidateOnly() bool
func (*CreatePhraseSetRequest) ProtoMessage
func (*CreatePhraseSetRequest) ProtoMessage()
func (*CreatePhraseSetRequest) ProtoReflect
func (x *CreatePhraseSetRequest) ProtoReflect() protoreflect.Message
func (*CreatePhraseSetRequest) Reset
func (x *CreatePhraseSetRequest) Reset()
func (*CreatePhraseSetRequest) String
func (x *CreatePhraseSetRequest) String() string
CreateRecognizerRequest
type CreateRecognizerRequest struct {
// Required. The Recognizer to create.
Recognizer *Recognizer `protobuf:"bytes,1,opt,name=recognizer,proto3" json:"recognizer,omitempty"`
// If set, validate the request and preview the Recognizer, but do not
// actually create it.
ValidateOnly bool `protobuf:"varint,2,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
// The ID to use for the Recognizer, which will become the final component of
// the Recognizer's resource name.
//
// This value should be 4-63 characters, and valid characters
// are /[a-z][0-9]-/.
RecognizerId string `protobuf:"bytes,3,opt,name=recognizer_id,json=recognizerId,proto3" json:"recognizer_id,omitempty"`
// Required. The project and location where this Recognizer will be created.
// The expected format is `projects/{project}/locations/{location}`.
Parent string `protobuf:"bytes,4,opt,name=parent,proto3" json:"parent,omitempty"`
// contains filtered or unexported fields
}
Request message for the [CreateRecognizer][google.cloud.speech.v2.Speech.CreateRecognizer] method.
func (*CreateRecognizerRequest) Descriptor
func (*CreateRecognizerRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateRecognizerRequest.ProtoReflect.Descriptor instead.
func (*CreateRecognizerRequest) GetParent
func (x *CreateRecognizerRequest) GetParent() string
func (*CreateRecognizerRequest) GetRecognizer
func (x *CreateRecognizerRequest) GetRecognizer() *Recognizer
func (*CreateRecognizerRequest) GetRecognizerId
func (x *CreateRecognizerRequest) GetRecognizerId() string
func (*CreateRecognizerRequest) GetValidateOnly
func (x *CreateRecognizerRequest) GetValidateOnly() bool
func (*CreateRecognizerRequest) ProtoMessage
func (*CreateRecognizerRequest) ProtoMessage()
func (*CreateRecognizerRequest) ProtoReflect
func (x *CreateRecognizerRequest) ProtoReflect() protoreflect.Message
func (*CreateRecognizerRequest) Reset
func (x *CreateRecognizerRequest) Reset()
func (*CreateRecognizerRequest) String
func (x *CreateRecognizerRequest) String() string
CustomClass
type CustomClass struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Uid string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"`
DisplayName string `protobuf:"bytes,4,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
Items []*CustomClass_ClassItem `protobuf:"bytes,5,rep,name=items,proto3" json:"items,omitempty"`
State CustomClass_State `protobuf:"varint,15,opt,name=state,proto3,enum=google.cloud.speech.v2.CustomClass_State" json:"state,omitempty"`
CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
DeleteTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"`
ExpireTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
Annotations map[string]string "" /* 164 byte string literal not displayed */
Etag string `protobuf:"bytes,11,opt,name=etag,proto3" json:"etag,omitempty"`
Reconciling bool `protobuf:"varint,12,opt,name=reconciling,proto3" json:"reconciling,omitempty"`
KmsKeyName string `protobuf:"bytes,13,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"`
KmsKeyVersionName string `protobuf:"bytes,14,opt,name=kms_key_version_name,json=kmsKeyVersionName,proto3" json:"kms_key_version_name,omitempty"`
}
CustomClass for biasing in speech recognition. Used to define a set of words or phrases that represents a common concept or theme likely to appear in your audio, for example a list of passenger ship names.
func (*CustomClass) Descriptor
func (*CustomClass) Descriptor() ([]byte, []int)
Deprecated: Use CustomClass.ProtoReflect.Descriptor instead.
func (*CustomClass) GetAnnotations
func (x *CustomClass) GetAnnotations() map[string]string
func (*CustomClass) GetCreateTime
func (x *CustomClass) GetCreateTime() *timestamppb.Timestamp
func (*CustomClass) GetDeleteTime
func (x *CustomClass) GetDeleteTime() *timestamppb.Timestamp
func (*CustomClass) GetDisplayName
func (x *CustomClass) GetDisplayName() string
func (*CustomClass) GetEtag
func (x *CustomClass) GetEtag() string
func (*CustomClass) GetExpireTime
func (x *CustomClass) GetExpireTime() *timestamppb.Timestamp
func (*CustomClass) GetItems
func (x *CustomClass) GetItems() []*CustomClass_ClassItem
func (*CustomClass) GetKmsKeyName
func (x *CustomClass) GetKmsKeyName() string
func (*CustomClass) GetKmsKeyVersionName
func (x *CustomClass) GetKmsKeyVersionName() string
func (*CustomClass) GetName
func (x *CustomClass) GetName() string
func (*CustomClass) GetReconciling
func (x *CustomClass) GetReconciling() bool
func (*CustomClass) GetState
func (x *CustomClass) GetState() CustomClass_State
func (*CustomClass) GetUid
func (x *CustomClass) GetUid() string
func (*CustomClass) GetUpdateTime
func (x *CustomClass) GetUpdateTime() *timestamppb.Timestamp
func (*CustomClass) ProtoMessage
func (*CustomClass) ProtoMessage()
func (*CustomClass) ProtoReflect
func (x *CustomClass) ProtoReflect() protoreflect.Message
func (*CustomClass) Reset
func (x *CustomClass) Reset()
func (*CustomClass) String
func (x *CustomClass) String() string
CustomClass_ClassItem
type CustomClass_ClassItem struct {
// The class item's value.
Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
// contains filtered or unexported fields
}
An item of the class.
func (*CustomClass_ClassItem) Descriptor
func (*CustomClass_ClassItem) Descriptor() ([]byte, []int)
Deprecated: Use CustomClass_ClassItem.ProtoReflect.Descriptor instead.
func (*CustomClass_ClassItem) GetValue
func (x *CustomClass_ClassItem) GetValue() string
func (*CustomClass_ClassItem) ProtoMessage
func (*CustomClass_ClassItem) ProtoMessage()
func (*CustomClass_ClassItem) ProtoReflect
func (x *CustomClass_ClassItem) ProtoReflect() protoreflect.Message
func (*CustomClass_ClassItem) Reset
func (x *CustomClass_ClassItem) Reset()
func (*CustomClass_ClassItem) String
func (x *CustomClass_ClassItem) String() string
CustomClass_State
type CustomClass_State int32
Set of states that define the lifecycle of a CustomClass.
CustomClass_STATE_UNSPECIFIED, CustomClass_ACTIVE, CustomClass_DELETED
const (
// Unspecified state. This is only used/useful for distinguishing
// unset values.
CustomClass_STATE_UNSPECIFIED CustomClass_State = 0
// The normal and active state.
CustomClass_ACTIVE CustomClass_State = 2
// This CustomClass has been deleted.
CustomClass_DELETED CustomClass_State = 4
)
func (CustomClass_State) Descriptor
func (CustomClass_State) Descriptor() protoreflect.EnumDescriptor
func (CustomClass_State) Enum
func (x CustomClass_State) Enum() *CustomClass_State
func (CustomClass_State) EnumDescriptor
func (CustomClass_State) EnumDescriptor() ([]byte, []int)
Deprecated: Use CustomClass_State.Descriptor instead.
func (CustomClass_State) Number
func (x CustomClass_State) Number() protoreflect.EnumNumber
func (CustomClass_State) String
func (x CustomClass_State) String() string
func (CustomClass_State) Type
func (CustomClass_State) Type() protoreflect.EnumType
DeleteCustomClassRequest
type DeleteCustomClassRequest struct {
// Required. The name of the CustomClass to delete.
// Format:
// `projects/{project}/locations/{location}/customClasses/{custom_class}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// If set, validate the request and preview the deleted CustomClass, but do
// not actually delete it.
ValidateOnly bool `protobuf:"varint,2,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
// If set to true, and the CustomClass is not found, the request will succeed
// and be a no-op (no Operation is recorded in this case).
AllowMissing bool `protobuf:"varint,4,opt,name=allow_missing,json=allowMissing,proto3" json:"allow_missing,omitempty"`
// This checksum is computed by the server based on the value of other
// fields. This may be sent on update, undelete, and delete requests to ensure
// the client has an up-to-date value before proceeding.
Etag string `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"`
// contains filtered or unexported fields
}
Request message for the [DeleteCustomClass][google.cloud.speech.v2.Speech.DeleteCustomClass] method.
func (*DeleteCustomClassRequest) Descriptor
func (*DeleteCustomClassRequest) Descriptor() ([]byte, []int)
Deprecated: Use DeleteCustomClassRequest.ProtoReflect.Descriptor instead.
func (*DeleteCustomClassRequest) GetAllowMissing
func (x *DeleteCustomClassRequest) GetAllowMissing() bool
func (*DeleteCustomClassRequest) GetEtag
func (x *DeleteCustomClassRequest) GetEtag() string
func (*DeleteCustomClassRequest) GetName
func (x *DeleteCustomClassRequest) GetName() string
func (*DeleteCustomClassRequest) GetValidateOnly
func (x *DeleteCustomClassRequest) GetValidateOnly() bool
func (*DeleteCustomClassRequest) ProtoMessage
func (*DeleteCustomClassRequest) ProtoMessage()
func (*DeleteCustomClassRequest) ProtoReflect
func (x *DeleteCustomClassRequest) ProtoReflect() protoreflect.Message
func (*DeleteCustomClassRequest) Reset
func (x *DeleteCustomClassRequest) Reset()
func (*DeleteCustomClassRequest) String
func (x *DeleteCustomClassRequest) String() string
DeletePhraseSetRequest
type DeletePhraseSetRequest struct {
// Required. The name of the PhraseSet to delete.
// Format: `projects/{project}/locations/{location}/phraseSets/{phrase_set}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// If set, validate the request and preview the deleted PhraseSet, but do not
// actually delete it.
ValidateOnly bool `protobuf:"varint,2,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
// If set to true, and the PhraseSet is not found, the request will succeed
// and be a no-op (no Operation is recorded in this case).
AllowMissing bool `protobuf:"varint,4,opt,name=allow_missing,json=allowMissing,proto3" json:"allow_missing,omitempty"`
// This checksum is computed by the server based on the value of other
// fields. This may be sent on update, undelete, and delete requests to ensure
// the client has an up-to-date value before proceeding.
Etag string `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"`
// contains filtered or unexported fields
}
Request message for the [DeletePhraseSet][google.cloud.speech.v2.Speech.DeletePhraseSet] method.
func (*DeletePhraseSetRequest) Descriptor
func (*DeletePhraseSetRequest) Descriptor() ([]byte, []int)
Deprecated: Use DeletePhraseSetRequest.ProtoReflect.Descriptor instead.
func (*DeletePhraseSetRequest) GetAllowMissing
func (x *DeletePhraseSetRequest) GetAllowMissing() bool
func (*DeletePhraseSetRequest) GetEtag
func (x *DeletePhraseSetRequest) GetEtag() string
func (*DeletePhraseSetRequest) GetName
func (x *DeletePhraseSetRequest) GetName() string
func (*DeletePhraseSetRequest) GetValidateOnly
func (x *DeletePhraseSetRequest) GetValidateOnly() bool
func (*DeletePhraseSetRequest) ProtoMessage
func (*DeletePhraseSetRequest) ProtoMessage()
func (*DeletePhraseSetRequest) ProtoReflect
func (x *DeletePhraseSetRequest) ProtoReflect() protoreflect.Message
func (*DeletePhraseSetRequest) Reset
func (x *DeletePhraseSetRequest) Reset()
func (*DeletePhraseSetRequest) String
func (x *DeletePhraseSetRequest) String() string
DeleteRecognizerRequest
type DeleteRecognizerRequest struct {
// Required. The name of the Recognizer to delete.
// Format: `projects/{project}/locations/{location}/recognizers/{recognizer}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// If set, validate the request and preview the deleted Recognizer, but do not
// actually delete it.
ValidateOnly bool `protobuf:"varint,2,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
// If set to true, and the Recognizer is not found, the request will succeed
// and be a no-op (no Operation is recorded in this case).
AllowMissing bool `protobuf:"varint,4,opt,name=allow_missing,json=allowMissing,proto3" json:"allow_missing,omitempty"`
// This checksum is computed by the server based on the value of other
// fields. This may be sent on update, undelete, and delete requests to ensure
// the client has an up-to-date value before proceeding.
Etag string `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"`
// contains filtered or unexported fields
}
Request message for the [DeleteRecognizer][google.cloud.speech.v2.Speech.DeleteRecognizer] method.
func (*DeleteRecognizerRequest) Descriptor
func (*DeleteRecognizerRequest) Descriptor() ([]byte, []int)
Deprecated: Use DeleteRecognizerRequest.ProtoReflect.Descriptor instead.
func (*DeleteRecognizerRequest) GetAllowMissing
func (x *DeleteRecognizerRequest) GetAllowMissing() bool
func (*DeleteRecognizerRequest) GetEtag
func (x *DeleteRecognizerRequest) GetEtag() string
func (*DeleteRecognizerRequest) GetName
func (x *DeleteRecognizerRequest) GetName() string
func (*DeleteRecognizerRequest) GetValidateOnly
func (x *DeleteRecognizerRequest) GetValidateOnly() bool
func (*DeleteRecognizerRequest) ProtoMessage
func (*DeleteRecognizerRequest) ProtoMessage()
func (*DeleteRecognizerRequest) ProtoReflect
func (x *DeleteRecognizerRequest) ProtoReflect() protoreflect.Message
func (*DeleteRecognizerRequest) Reset
func (x *DeleteRecognizerRequest) Reset()
func (*DeleteRecognizerRequest) String
func (x *DeleteRecognizerRequest) String() string
ExplicitDecodingConfig
type ExplicitDecodingConfig struct {
Encoding ExplicitDecodingConfig_AudioEncoding "" /* 135 byte string literal not displayed */
SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
AudioChannelCount int32 `protobuf:"varint,3,opt,name=audio_channel_count,json=audioChannelCount,proto3" json:"audio_channel_count,omitempty"`
}
Explicitly specified decoding parameters.
func (*ExplicitDecodingConfig) Descriptor
func (*ExplicitDecodingConfig) Descriptor() ([]byte, []int)
Deprecated: Use ExplicitDecodingConfig.ProtoReflect.Descriptor instead.
func (*ExplicitDecodingConfig) GetAudioChannelCount
func (x *ExplicitDecodingConfig) GetAudioChannelCount() int32
func (*ExplicitDecodingConfig) GetEncoding
func (x *ExplicitDecodingConfig) GetEncoding() ExplicitDecodingConfig_AudioEncoding
func (*ExplicitDecodingConfig) GetSampleRateHertz
func (x *ExplicitDecodingConfig) GetSampleRateHertz() int32
func (*ExplicitDecodingConfig) ProtoMessage
func (*ExplicitDecodingConfig) ProtoMessage()
func (*ExplicitDecodingConfig) ProtoReflect
func (x *ExplicitDecodingConfig) ProtoReflect() protoreflect.Message
func (*ExplicitDecodingConfig) Reset
func (x *ExplicitDecodingConfig) Reset()
func (*ExplicitDecodingConfig) String
func (x *ExplicitDecodingConfig) String() string
ExplicitDecodingConfig_AudioEncoding
type ExplicitDecodingConfig_AudioEncoding int32
Supported audio data encodings.
ExplicitDecodingConfig_AUDIO_ENCODING_UNSPECIFIED, ExplicitDecodingConfig_LINEAR16, ExplicitDecodingConfig_MULAW, ExplicitDecodingConfig_ALAW
const (
// Default value. This value is unused.
ExplicitDecodingConfig_AUDIO_ENCODING_UNSPECIFIED ExplicitDecodingConfig_AudioEncoding = 0
// Headerless 16-bit signed little-endian PCM samples.
ExplicitDecodingConfig_LINEAR16 ExplicitDecodingConfig_AudioEncoding = 1
// Headerless 8-bit companded mulaw samples.
ExplicitDecodingConfig_MULAW ExplicitDecodingConfig_AudioEncoding = 2
// Headerless 8-bit companded alaw samples.
ExplicitDecodingConfig_ALAW ExplicitDecodingConfig_AudioEncoding = 3
)
func (ExplicitDecodingConfig_AudioEncoding) Descriptor
func (ExplicitDecodingConfig_AudioEncoding) Descriptor() protoreflect.EnumDescriptor
func (ExplicitDecodingConfig_AudioEncoding) Enum
func (x ExplicitDecodingConfig_AudioEncoding) Enum() *ExplicitDecodingConfig_AudioEncoding
func (ExplicitDecodingConfig_AudioEncoding) EnumDescriptor
func (ExplicitDecodingConfig_AudioEncoding) EnumDescriptor() ([]byte, []int)
Deprecated: Use ExplicitDecodingConfig_AudioEncoding.Descriptor instead.
func (ExplicitDecodingConfig_AudioEncoding) Number
func (x ExplicitDecodingConfig_AudioEncoding) Number() protoreflect.EnumNumber
func (ExplicitDecodingConfig_AudioEncoding) String
func (x ExplicitDecodingConfig_AudioEncoding) String() string
func (ExplicitDecodingConfig_AudioEncoding) Type
func (ExplicitDecodingConfig_AudioEncoding) Type() protoreflect.EnumType
GetConfigRequest
type GetConfigRequest struct {
// Required. The name of the config to retrieve. There is exactly one config
// resource per project per location. The expected format is
// `projects/{project}/locations/{location}/config`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
Request message for the [GetConfig][google.cloud.speech.v2.Speech.GetConfig] method.
func (*GetConfigRequest) Descriptor
func (*GetConfigRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetConfigRequest.ProtoReflect.Descriptor instead.
func (*GetConfigRequest) GetName
func (x *GetConfigRequest) GetName() string
func (*GetConfigRequest) ProtoMessage
func (*GetConfigRequest) ProtoMessage()
func (*GetConfigRequest) ProtoReflect
func (x *GetConfigRequest) ProtoReflect() protoreflect.Message
func (*GetConfigRequest) Reset
func (x *GetConfigRequest) Reset()
func (*GetConfigRequest) String
func (x *GetConfigRequest) String() string
GetCustomClassRequest
type GetCustomClassRequest struct {
// Required. The name of the CustomClass to retrieve. The expected format is
// `projects/{project}/locations/{location}/customClasses/{custom_class}`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
Request message for the [GetCustomClass][google.cloud.speech.v2.Speech.GetCustomClass] method.
func (*GetCustomClassRequest) Descriptor
func (*GetCustomClassRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetCustomClassRequest.ProtoReflect.Descriptor instead.
func (*GetCustomClassRequest) GetName
func (x *GetCustomClassRequest) GetName() string
func (*GetCustomClassRequest) ProtoMessage
func (*GetCustomClassRequest) ProtoMessage()
func (*GetCustomClassRequest) ProtoReflect
func (x *GetCustomClassRequest) ProtoReflect() protoreflect.Message
func (*GetCustomClassRequest) Reset
func (x *GetCustomClassRequest) Reset()
func (*GetCustomClassRequest) String
func (x *GetCustomClassRequest) String() string
GetPhraseSetRequest
type GetPhraseSetRequest struct {
// Required. The name of the PhraseSet to retrieve. The expected format is
// `projects/{project}/locations/{location}/phraseSets/{phrase_set}`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
Request message for the [GetPhraseSet][google.cloud.speech.v2.Speech.GetPhraseSet] method.
func (*GetPhraseSetRequest) Descriptor
func (*GetPhraseSetRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetPhraseSetRequest.ProtoReflect.Descriptor instead.
func (*GetPhraseSetRequest) GetName
func (x *GetPhraseSetRequest) GetName() string
func (*GetPhraseSetRequest) ProtoMessage
func (*GetPhraseSetRequest) ProtoMessage()
func (*GetPhraseSetRequest) ProtoReflect
func (x *GetPhraseSetRequest) ProtoReflect() protoreflect.Message
func (*GetPhraseSetRequest) Reset
func (x *GetPhraseSetRequest) Reset()
func (*GetPhraseSetRequest) String
func (x *GetPhraseSetRequest) String() string
GetRecognizerRequest
type GetRecognizerRequest struct {
// Required. The name of the Recognizer to retrieve. The expected format is
// `projects/{project}/locations/{location}/recognizers/{recognizer}`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
Request message for the [GetRecognizer][google.cloud.speech.v2.Speech.GetRecognizer] method.
func (*GetRecognizerRequest) Descriptor
func (*GetRecognizerRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetRecognizerRequest.ProtoReflect.Descriptor instead.
func (*GetRecognizerRequest) GetName
func (x *GetRecognizerRequest) GetName() string
func (*GetRecognizerRequest) ProtoMessage
func (*GetRecognizerRequest) ProtoMessage()
func (*GetRecognizerRequest) ProtoReflect
func (x *GetRecognizerRequest) ProtoReflect() protoreflect.Message
func (*GetRecognizerRequest) Reset
func (x *GetRecognizerRequest) Reset()
func (*GetRecognizerRequest) String
func (x *GetRecognizerRequest) String() string
ListCustomClassesRequest
type ListCustomClassesRequest struct {
// Required. The project and location of CustomClass resources to list. The
// expected format is `projects/{project}/locations/{location}`.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Number of results per requests. A valid page_size ranges from 0 to 20
// inclusive. If the page_size is zero or unspecified, a page size of 5 will
// be chosen. If the page size exceeds 20, it will be coerced down to 20. Note
// that a call might return fewer results than the requested page size.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// A page token, received from a previous
// [ListCustomClasses][google.cloud.speech.v2.Speech.ListCustomClasses] call.
// Provide this to retrieve the subsequent page.
//
// When paginating, all other parameters provided to
// [ListCustomClasses][google.cloud.speech.v2.Speech.ListCustomClasses] must
// match the call that provided the page token.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// Whether, or not, to show resources that have been deleted.
ShowDeleted bool `protobuf:"varint,4,opt,name=show_deleted,json=showDeleted,proto3" json:"show_deleted,omitempty"`
// contains filtered or unexported fields
}
Request message for the [ListCustomClasses][google.cloud.speech.v2.Speech.ListCustomClasses] method.
func (*ListCustomClassesRequest) Descriptor
func (*ListCustomClassesRequest) Descriptor() ([]byte, []int)
Deprecated: Use ListCustomClassesRequest.ProtoReflect.Descriptor instead.
func (*ListCustomClassesRequest) GetPageSize
func (x *ListCustomClassesRequest) GetPageSize() int32
func (*ListCustomClassesRequest) GetPageToken
func (x *ListCustomClassesRequest) GetPageToken() string
func (*ListCustomClassesRequest) GetParent
func (x *ListCustomClassesRequest) GetParent() string
func (*ListCustomClassesRequest) GetShowDeleted
func (x *ListCustomClassesRequest) GetShowDeleted() bool
func (*ListCustomClassesRequest) ProtoMessage
func (*ListCustomClassesRequest) ProtoMessage()
func (*ListCustomClassesRequest) ProtoReflect
func (x *ListCustomClassesRequest) ProtoReflect() protoreflect.Message
func (*ListCustomClassesRequest) Reset
func (x *ListCustomClassesRequest) Reset()
func (*ListCustomClassesRequest) String
func (x *ListCustomClassesRequest) String() string
ListCustomClassesResponse
type ListCustomClassesResponse struct {
// The list of requested CustomClasses.
CustomClasses []*CustomClass `protobuf:"bytes,1,rep,name=custom_classes,json=customClasses,proto3" json:"custom_classes,omitempty"`
// A token, which can be sent as
// [page_token][google.cloud.speech.v2.ListCustomClassesRequest.page_token] to
// retrieve the next page. If this field is omitted, there are no subsequent
// pages. This token expires after 72 hours.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// contains filtered or unexported fields
}
Response message for the [ListCustomClasses][google.cloud.speech.v2.Speech.ListCustomClasses] method.
func (*ListCustomClassesResponse) Descriptor
func (*ListCustomClassesResponse) Descriptor() ([]byte, []int)
Deprecated: Use ListCustomClassesResponse.ProtoReflect.Descriptor instead.
func (*ListCustomClassesResponse) GetCustomClasses
func (x *ListCustomClassesResponse) GetCustomClasses() []*CustomClass
func (*ListCustomClassesResponse) GetNextPageToken
func (x *ListCustomClassesResponse) GetNextPageToken() string
func (*ListCustomClassesResponse) ProtoMessage
func (*ListCustomClassesResponse) ProtoMessage()
func (*ListCustomClassesResponse) ProtoReflect
func (x *ListCustomClassesResponse) ProtoReflect() protoreflect.Message
func (*ListCustomClassesResponse) Reset
func (x *ListCustomClassesResponse) Reset()
func (*ListCustomClassesResponse) String
func (x *ListCustomClassesResponse) String() string
ListPhraseSetsRequest
type ListPhraseSetsRequest struct {
// Required. The project and location of PhraseSet resources to list. The
// expected format is `projects/{project}/locations/{location}`.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// The maximum number of PhraseSets to return. The service may return fewer
// than this value. If unspecified, at most 20 PhraseSets will be returned.
// The maximum value is 20; values above 20 will be coerced to 20.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// A page token, received from a previous
// [ListPhraseSets][google.cloud.speech.v2.Speech.ListPhraseSets] call.
// Provide this to retrieve the subsequent page.
//
// When paginating, all other parameters provided to
// [ListPhraseSets][google.cloud.speech.v2.Speech.ListPhraseSets] must match
// the call that provided the page token.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// Whether, or not, to show resources that have been deleted.
ShowDeleted bool `protobuf:"varint,4,opt,name=show_deleted,json=showDeleted,proto3" json:"show_deleted,omitempty"`
// contains filtered or unexported fields
}
Request message for the [ListPhraseSets][google.cloud.speech.v2.Speech.ListPhraseSets] method.
func (*ListPhraseSetsRequest) Descriptor
func (*ListPhraseSetsRequest) Descriptor() ([]byte, []int)
Deprecated: Use ListPhraseSetsRequest.ProtoReflect.Descriptor instead.
func (*ListPhraseSetsRequest) GetPageSize
func (x *ListPhraseSetsRequest) GetPageSize() int32
func (*ListPhraseSetsRequest) GetPageToken
func (x *ListPhraseSetsRequest) GetPageToken() string
func (*ListPhraseSetsRequest) GetParent
func (x *ListPhraseSetsRequest) GetParent() string
func (*ListPhraseSetsRequest) GetShowDeleted
func (x *ListPhraseSetsRequest) GetShowDeleted() bool
func (*ListPhraseSetsRequest) ProtoMessage
func (*ListPhraseSetsRequest) ProtoMessage()
func (*ListPhraseSetsRequest) ProtoReflect
func (x *ListPhraseSetsRequest) ProtoReflect() protoreflect.Message
func (*ListPhraseSetsRequest) Reset
func (x *ListPhraseSetsRequest) Reset()
func (*ListPhraseSetsRequest) String
func (x *ListPhraseSetsRequest) String() string
ListPhraseSetsResponse
type ListPhraseSetsResponse struct {
// The list of requested PhraseSets.
PhraseSets []*PhraseSet `protobuf:"bytes,1,rep,name=phrase_sets,json=phraseSets,proto3" json:"phrase_sets,omitempty"`
// A token, which can be sent as
// [page_token][google.cloud.speech.v2.ListPhraseSetsRequest.page_token] to
// retrieve the next page. If this field is omitted, there are no subsequent
// pages. This token expires after 72 hours.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// contains filtered or unexported fields
}
Response message for the [ListPhraseSets][google.cloud.speech.v2.Speech.ListPhraseSets] method.
func (*ListPhraseSetsResponse) Descriptor
func (*ListPhraseSetsResponse) Descriptor() ([]byte, []int)
Deprecated: Use ListPhraseSetsResponse.ProtoReflect.Descriptor instead.
func (*ListPhraseSetsResponse) GetNextPageToken
func (x *ListPhraseSetsResponse) GetNextPageToken() string
func (*ListPhraseSetsResponse) GetPhraseSets
func (x *ListPhraseSetsResponse) GetPhraseSets() []*PhraseSet
func (*ListPhraseSetsResponse) ProtoMessage
func (*ListPhraseSetsResponse) ProtoMessage()
func (*ListPhraseSetsResponse) ProtoReflect
func (x *ListPhraseSetsResponse) ProtoReflect() protoreflect.Message
func (*ListPhraseSetsResponse) Reset
func (x *ListPhraseSetsResponse) Reset()
func (*ListPhraseSetsResponse) String
func (x *ListPhraseSetsResponse) String() string
ListRecognizersRequest
type ListRecognizersRequest struct {
// Required. The project and location of Recognizers to list. The expected
// format is `projects/{project}/locations/{location}`.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// The maximum number of Recognizers to return. The service may return fewer
// than this value. If unspecified, at most 20 Recognizers will be returned.
// The maximum value is 20; values above 20 will be coerced to 20.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// A page token, received from a previous
// [ListRecognizers][google.cloud.speech.v2.Speech.ListRecognizers] call.
// Provide this to retrieve the subsequent page.
//
// When paginating, all other parameters provided to
// [ListRecognizers][google.cloud.speech.v2.Speech.ListRecognizers] must match
// the call that provided the page token.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// Whether, or not, to show resources that have been deleted.
ShowDeleted bool `protobuf:"varint,4,opt,name=show_deleted,json=showDeleted,proto3" json:"show_deleted,omitempty"`
// contains filtered or unexported fields
}
Request message for the [ListRecognizers][google.cloud.speech.v2.Speech.ListRecognizers] method.
func (*ListRecognizersRequest) Descriptor
func (*ListRecognizersRequest) Descriptor() ([]byte, []int)
Deprecated: Use ListRecognizersRequest.ProtoReflect.Descriptor instead.
func (*ListRecognizersRequest) GetPageSize
func (x *ListRecognizersRequest) GetPageSize() int32
func (*ListRecognizersRequest) GetPageToken
func (x *ListRecognizersRequest) GetPageToken() string
func (*ListRecognizersRequest) GetParent
func (x *ListRecognizersRequest) GetParent() string
func (*ListRecognizersRequest) GetShowDeleted
func (x *ListRecognizersRequest) GetShowDeleted() bool
func (*ListRecognizersRequest) ProtoMessage
func (*ListRecognizersRequest) ProtoMessage()
func (*ListRecognizersRequest) ProtoReflect
func (x *ListRecognizersRequest) ProtoReflect() protoreflect.Message
func (*ListRecognizersRequest) Reset
func (x *ListRecognizersRequest) Reset()
func (*ListRecognizersRequest) String
func (x *ListRecognizersRequest) String() string
ListRecognizersResponse
type ListRecognizersResponse struct {
// The list of requested Recognizers.
Recognizers []*Recognizer `protobuf:"bytes,1,rep,name=recognizers,proto3" json:"recognizers,omitempty"`
// A token, which can be sent as
// [page_token][google.cloud.speech.v2.ListRecognizersRequest.page_token] to
// retrieve the next page. If this field is omitted, there are no subsequent
// pages. This token expires after 72 hours.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// contains filtered or unexported fields
}
Response message for the [ListRecognizers][google.cloud.speech.v2.Speech.ListRecognizers] method.
func (*ListRecognizersResponse) Descriptor
func (*ListRecognizersResponse) Descriptor() ([]byte, []int)
Deprecated: Use ListRecognizersResponse.ProtoReflect.Descriptor instead.
func (*ListRecognizersResponse) GetNextPageToken
func (x *ListRecognizersResponse) GetNextPageToken() string
func (*ListRecognizersResponse) GetRecognizers
func (x *ListRecognizersResponse) GetRecognizers() []*Recognizer
func (*ListRecognizersResponse) ProtoMessage
func (*ListRecognizersResponse) ProtoMessage()
func (*ListRecognizersResponse) ProtoReflect
func (x *ListRecognizersResponse) ProtoReflect() protoreflect.Message
func (*ListRecognizersResponse) Reset
func (x *ListRecognizersResponse) Reset()
func (*ListRecognizersResponse) String
func (x *ListRecognizersResponse) String() string
OperationMetadata
type OperationMetadata struct {
// The time the operation was created.
CreateTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// The time the operation was last updated.
UpdateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
// The resource path for the target of the operation.
Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
// The method that triggered the operation.
Method string `protobuf:"bytes,4,opt,name=method,proto3" json:"method,omitempty"`
// The [KMS key
// name](https://cloud.google.com/kms/docs/resource-hierarchy#keys) with which
// the content of the Operation is encrypted. The expected format is
// `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`.
KmsKeyName string `protobuf:"bytes,6,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"`
// The [KMS key version
// name](https://cloud.google.com/kms/docs/resource-hierarchy#key_versions)
// with which content of the Operation is encrypted. The expected format is
// `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}`.
KmsKeyVersionName string `protobuf:"bytes,7,opt,name=kms_key_version_name,json=kmsKeyVersionName,proto3" json:"kms_key_version_name,omitempty"`
// The request that spawned the Operation.
//
// Types that are assignable to Request:
//
// *OperationMetadata_BatchRecognizeRequest
// *OperationMetadata_CreateRecognizerRequest
// *OperationMetadata_UpdateRecognizerRequest
// *OperationMetadata_DeleteRecognizerRequest
// *OperationMetadata_UndeleteRecognizerRequest
// *OperationMetadata_CreateCustomClassRequest
// *OperationMetadata_UpdateCustomClassRequest
// *OperationMetadata_DeleteCustomClassRequest
// *OperationMetadata_UndeleteCustomClassRequest
// *OperationMetadata_CreatePhraseSetRequest
// *OperationMetadata_UpdatePhraseSetRequest
// *OperationMetadata_DeletePhraseSetRequest
// *OperationMetadata_UndeletePhraseSetRequest
// *OperationMetadata_UpdateConfigRequest
Request isOperationMetadata_Request `protobuf_oneof:"request"`
// The percent progress of the Operation. Values can range from 0-100. If the
// value is 100, then the operation is finished.
ProgressPercent int32 `protobuf:"varint,22,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
// Specific metadata per RPC
//
// Types that are assignable to Metadata:
//
// *OperationMetadata_BatchRecognizeMetadata
Metadata isOperationMetadata_Metadata `protobuf_oneof:"metadata"`
// contains filtered or unexported fields
}
Represents the metadata of a long-running operation.
func (*OperationMetadata) Descriptor
func (*OperationMetadata) Descriptor() ([]byte, []int)
Deprecated: Use OperationMetadata.ProtoReflect.Descriptor instead.
func (*OperationMetadata) GetBatchRecognizeMetadata
func (x *OperationMetadata) GetBatchRecognizeMetadata() *BatchRecognizeMetadata
func (*OperationMetadata) GetBatchRecognizeRequest
func (x *OperationMetadata) GetBatchRecognizeRequest() *BatchRecognizeRequest
func (*OperationMetadata) GetCreateCustomClassRequest
func (x *OperationMetadata) GetCreateCustomClassRequest() *CreateCustomClassRequest
func (*OperationMetadata) GetCreatePhraseSetRequest
func (x *OperationMetadata) GetCreatePhraseSetRequest() *CreatePhraseSetRequest
func (*OperationMetadata) GetCreateRecognizerRequest
func (x *OperationMetadata) GetCreateRecognizerRequest() *CreateRecognizerRequest
func (*OperationMetadata) GetCreateTime
func (x *OperationMetadata) GetCreateTime() *timestamppb.Timestamp
func (*OperationMetadata) GetDeleteCustomClassRequest
func (x *OperationMetadata) GetDeleteCustomClassRequest() *DeleteCustomClassRequest
func (*OperationMetadata) GetDeletePhraseSetRequest
func (x *OperationMetadata) GetDeletePhraseSetRequest() *DeletePhraseSetRequest
func (*OperationMetadata) GetDeleteRecognizerRequest
func (x *OperationMetadata) GetDeleteRecognizerRequest() *DeleteRecognizerRequest
func (*OperationMetadata) GetKmsKeyName
func (x *OperationMetadata) GetKmsKeyName() string
func (*OperationMetadata) GetKmsKeyVersionName
func (x *OperationMetadata) GetKmsKeyVersionName() string
func (*OperationMetadata) GetMetadata
func (m *OperationMetadata) GetMetadata() isOperationMetadata_Metadata
func (*OperationMetadata) GetMethod
func (x *OperationMetadata) GetMethod() string
func (*OperationMetadata) GetProgressPercent
func (x *OperationMetadata) GetProgressPercent() int32
func (*OperationMetadata) GetRequest
func (m *OperationMetadata) GetRequest() isOperationMetadata_Request
func (*OperationMetadata) GetResource
func (x *OperationMetadata) GetResource() string
func (*OperationMetadata) GetUndeleteCustomClassRequest
func (x *OperationMetadata) GetUndeleteCustomClassRequest() *UndeleteCustomClassRequest
func (*OperationMetadata) GetUndeletePhraseSetRequest
func (x *OperationMetadata) GetUndeletePhraseSetRequest() *UndeletePhraseSetRequest
func (*OperationMetadata) GetUndeleteRecognizerRequest
func (x *OperationMetadata) GetUndeleteRecognizerRequest() *UndeleteRecognizerRequest
func (*OperationMetadata) GetUpdateConfigRequest
func (x *OperationMetadata) GetUpdateConfigRequest() *UpdateConfigRequest
func (*OperationMetadata) GetUpdateCustomClassRequest
func (x *OperationMetadata) GetUpdateCustomClassRequest() *UpdateCustomClassRequest
func (*OperationMetadata) GetUpdatePhraseSetRequest
func (x *OperationMetadata) GetUpdatePhraseSetRequest() *UpdatePhraseSetRequest
func (*OperationMetadata) GetUpdateRecognizerRequest
func (x *OperationMetadata) GetUpdateRecognizerRequest() *UpdateRecognizerRequest
func (*OperationMetadata) GetUpdateTime
func (x *OperationMetadata) GetUpdateTime() *timestamppb.Timestamp
func (*OperationMetadata) ProtoMessage
func (*OperationMetadata) ProtoMessage()
func (*OperationMetadata) ProtoReflect
func (x *OperationMetadata) ProtoReflect() protoreflect.Message
func (*OperationMetadata) Reset
func (x *OperationMetadata) Reset()
func (*OperationMetadata) String
func (x *OperationMetadata) String() string
OperationMetadata_BatchRecognizeMetadata
type OperationMetadata_BatchRecognizeMetadata struct {
// Metadata specific to the BatchRecognize method.
BatchRecognizeMetadata *BatchRecognizeMetadata `protobuf:"bytes,23,opt,name=batch_recognize_metadata,json=batchRecognizeMetadata,proto3,oneof"`
}
OperationMetadata_BatchRecognizeRequest
type OperationMetadata_BatchRecognizeRequest struct {
// The BatchRecognizeRequest that spawned the Operation.
BatchRecognizeRequest *BatchRecognizeRequest `protobuf:"bytes,8,opt,name=batch_recognize_request,json=batchRecognizeRequest,proto3,oneof"`
}
OperationMetadata_CreateCustomClassRequest
type OperationMetadata_CreateCustomClassRequest struct {
// The CreateCustomClassRequest that spawned the Operation.
CreateCustomClassRequest *CreateCustomClassRequest `protobuf:"bytes,13,opt,name=create_custom_class_request,json=createCustomClassRequest,proto3,oneof"`
}
OperationMetadata_CreatePhraseSetRequest
type OperationMetadata_CreatePhraseSetRequest struct {
// The CreatePhraseSetRequest that spawned the Operation.
CreatePhraseSetRequest *CreatePhraseSetRequest `protobuf:"bytes,17,opt,name=create_phrase_set_request,json=createPhraseSetRequest,proto3,oneof"`
}
OperationMetadata_CreateRecognizerRequest
type OperationMetadata_CreateRecognizerRequest struct {
// The CreateRecognizerRequest that spawned the Operation.
CreateRecognizerRequest *CreateRecognizerRequest `protobuf:"bytes,9,opt,name=create_recognizer_request,json=createRecognizerRequest,proto3,oneof"`
}
OperationMetadata_DeleteCustomClassRequest
type OperationMetadata_DeleteCustomClassRequest struct {
// The DeleteCustomClassRequest that spawned the Operation.
DeleteCustomClassRequest *DeleteCustomClassRequest `protobuf:"bytes,15,opt,name=delete_custom_class_request,json=deleteCustomClassRequest,proto3,oneof"`
}
OperationMetadata_DeletePhraseSetRequest
type OperationMetadata_DeletePhraseSetRequest struct {
// The DeletePhraseSetRequest that spawned the Operation.
DeletePhraseSetRequest *DeletePhraseSetRequest `protobuf:"bytes,19,opt,name=delete_phrase_set_request,json=deletePhraseSetRequest,proto3,oneof"`
}
OperationMetadata_DeleteRecognizerRequest
type OperationMetadata_DeleteRecognizerRequest struct {
// The DeleteRecognizerRequest that spawned the Operation.
DeleteRecognizerRequest *DeleteRecognizerRequest `protobuf:"bytes,11,opt,name=delete_recognizer_request,json=deleteRecognizerRequest,proto3,oneof"`
}
OperationMetadata_UndeleteCustomClassRequest
type OperationMetadata_UndeleteCustomClassRequest struct {
// The UndeleteCustomClassRequest that spawned the Operation.
UndeleteCustomClassRequest *UndeleteCustomClassRequest `protobuf:"bytes,16,opt,name=undelete_custom_class_request,json=undeleteCustomClassRequest,proto3,oneof"`
}
OperationMetadata_UndeletePhraseSetRequest
type OperationMetadata_UndeletePhraseSetRequest struct {
// The UndeletePhraseSetRequest that spawned the Operation.
UndeletePhraseSetRequest *UndeletePhraseSetRequest `protobuf:"bytes,20,opt,name=undelete_phrase_set_request,json=undeletePhraseSetRequest,proto3,oneof"`
}
OperationMetadata_UndeleteRecognizerRequest
type OperationMetadata_UndeleteRecognizerRequest struct {
// The UndeleteRecognizerRequest that spawned the Operation.
UndeleteRecognizerRequest *UndeleteRecognizerRequest `protobuf:"bytes,12,opt,name=undelete_recognizer_request,json=undeleteRecognizerRequest,proto3,oneof"`
}
OperationMetadata_UpdateConfigRequest
type OperationMetadata_UpdateConfigRequest struct {
// The UpdateConfigRequest that spawned the Operation.
UpdateConfigRequest *UpdateConfigRequest `protobuf:"bytes,21,opt,name=update_config_request,json=updateConfigRequest,proto3,oneof"`
}
OperationMetadata_UpdateCustomClassRequest
type OperationMetadata_UpdateCustomClassRequest struct {
// The UpdateCustomClassRequest that spawned the Operation.
UpdateCustomClassRequest *UpdateCustomClassRequest `protobuf:"bytes,14,opt,name=update_custom_class_request,json=updateCustomClassRequest,proto3,oneof"`
}
OperationMetadata_UpdatePhraseSetRequest
type OperationMetadata_UpdatePhraseSetRequest struct {
// The UpdatePhraseSetRequest that spawned the Operation.
UpdatePhraseSetRequest *UpdatePhraseSetRequest `protobuf:"bytes,18,opt,name=update_phrase_set_request,json=updatePhraseSetRequest,proto3,oneof"`
}
OperationMetadata_UpdateRecognizerRequest
type OperationMetadata_UpdateRecognizerRequest struct {
// The UpdateRecognizerRequest that spawned the Operation.
UpdateRecognizerRequest *UpdateRecognizerRequest `protobuf:"bytes,10,opt,name=update_recognizer_request,json=updateRecognizerRequest,proto3,oneof"`
}
PhraseSet
type PhraseSet struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Uid string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"`
Phrases []*PhraseSet_Phrase `protobuf:"bytes,3,rep,name=phrases,proto3" json:"phrases,omitempty"`
Boost float32 `protobuf:"fixed32,4,opt,name=boost,proto3" json:"boost,omitempty"`
DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
State PhraseSet_State `protobuf:"varint,15,opt,name=state,proto3,enum=google.cloud.speech.v2.PhraseSet_State" json:"state,omitempty"`
CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
DeleteTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"`
ExpireTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
Annotations map[string]string "" /* 164 byte string literal not displayed */
Etag string `protobuf:"bytes,11,opt,name=etag,proto3" json:"etag,omitempty"`
Reconciling bool `protobuf:"varint,12,opt,name=reconciling,proto3" json:"reconciling,omitempty"`
KmsKeyName string `protobuf:"bytes,13,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"`
KmsKeyVersionName string `protobuf:"bytes,14,opt,name=kms_key_version_name,json=kmsKeyVersionName,proto3" json:"kms_key_version_name,omitempty"`
}
PhraseSet for biasing in speech recognition. A PhraseSet is used to provide "hints" to the speech recognizer to favor specific words and phrases in the results.
func (*PhraseSet) Descriptor
Deprecated: Use PhraseSet.ProtoReflect.Descriptor instead.
func (*PhraseSet) GetAnnotations
func (*PhraseSet) GetBoost
func (*PhraseSet) GetCreateTime
func (x *PhraseSet) GetCreateTime() *timestamppb.Timestamp
func (*PhraseSet) GetDeleteTime
func (x *PhraseSet) GetDeleteTime() *timestamppb.Timestamp
func (*PhraseSet) GetDisplayName
func (*PhraseSet) GetEtag
func (*PhraseSet) GetExpireTime
func (x *PhraseSet) GetExpireTime() *timestamppb.Timestamp
func (*PhraseSet) GetKmsKeyName
func (*PhraseSet) GetKmsKeyVersionName
func (*PhraseSet) GetName
func (*PhraseSet) GetPhrases
func (x *PhraseSet) GetPhrases() []*PhraseSet_Phrase
func (*PhraseSet) GetReconciling
func (*PhraseSet) GetState
func (x *PhraseSet) GetState() PhraseSet_State
func (*PhraseSet) GetUid
func (*PhraseSet) GetUpdateTime
func (x *PhraseSet) GetUpdateTime() *timestamppb.Timestamp
func (*PhraseSet) ProtoMessage
func (*PhraseSet) ProtoMessage()
func (*PhraseSet) ProtoReflect
func (x *PhraseSet) ProtoReflect() protoreflect.Message
func (*PhraseSet) Reset
func (x *PhraseSet) Reset()
func (*PhraseSet) String
PhraseSet_Phrase
type PhraseSet_Phrase struct {
// The phrase itself.
Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
// Hint Boost. Overrides the boost set at the phrase set level.
// Positive value will increase the probability that a specific phrase will
// be recognized over other similar sounding phrases. The higher the boost,
// the higher the chance of false positive recognition as well. Negative
// boost values would correspond to anti-biasing. Anti-biasing is not
// enabled, so negative boost will simply be ignored. Though `boost` can
// accept a wide range of positive values, most use cases are best served
// with values between 0 and 20. We recommend using a binary search approach
// to finding the optimal value for your use case. Speech recognition
// will skip PhraseSets with a boost value of 0.
Boost float32 `protobuf:"fixed32,2,opt,name=boost,proto3" json:"boost,omitempty"`
// contains filtered or unexported fields
}
A Phrase contains words and phrase "hints" so that the speech recognition is more likely to recognize them. This can be used to improve the accuracy for specific words and phrases, for example, if specific commands are typically spoken by the user. This can also be used to add additional words to the vocabulary of the recognizer.
List items can also include CustomClass references containing groups of words that represent common concepts that occur in natural language.
func (*PhraseSet_Phrase) Descriptor
func (*PhraseSet_Phrase) Descriptor() ([]byte, []int)
Deprecated: Use PhraseSet_Phrase.ProtoReflect.Descriptor instead.
func (*PhraseSet_Phrase) GetBoost
func (x *PhraseSet_Phrase) GetBoost() float32
func (*PhraseSet_Phrase) GetValue
func (x *PhraseSet_Phrase) GetValue() string
func (*PhraseSet_Phrase) ProtoMessage
func (*PhraseSet_Phrase) ProtoMessage()
func (*PhraseSet_Phrase) ProtoReflect
func (x *PhraseSet_Phrase) ProtoReflect() protoreflect.Message
func (*PhraseSet_Phrase) Reset
func (x *PhraseSet_Phrase) Reset()
func (*PhraseSet_Phrase) String
func (x *PhraseSet_Phrase) String() string
PhraseSet_State
type PhraseSet_State int32
Set of states that define the lifecycle of a PhraseSet.
PhraseSet_STATE_UNSPECIFIED, PhraseSet_ACTIVE, PhraseSet_DELETED
const (
// Unspecified state. This is only used/useful for distinguishing
// unset values.
PhraseSet_STATE_UNSPECIFIED PhraseSet_State = 0
// The normal and active state.
PhraseSet_ACTIVE PhraseSet_State = 2
// This PhraseSet has been deleted.
PhraseSet_DELETED PhraseSet_State = 4
)
func (PhraseSet_State) Descriptor
func (PhraseSet_State) Descriptor() protoreflect.EnumDescriptor
func (PhraseSet_State) Enum
func (x PhraseSet_State) Enum() *PhraseSet_State
func (PhraseSet_State) EnumDescriptor
func (PhraseSet_State) EnumDescriptor() ([]byte, []int)
Deprecated: Use PhraseSet_State.Descriptor instead.
func (PhraseSet_State) Number
func (x PhraseSet_State) Number() protoreflect.EnumNumber
func (PhraseSet_State) String
func (x PhraseSet_State) String() string
func (PhraseSet_State) Type
func (PhraseSet_State) Type() protoreflect.EnumType
RecognitionConfig
type RecognitionConfig struct {
// Decoding parameters for audio being sent for recognition.
//
// Types that are assignable to DecodingConfig:
//
// *RecognitionConfig_AutoDecodingConfig
// *RecognitionConfig_ExplicitDecodingConfig
DecodingConfig isRecognitionConfig_DecodingConfig `protobuf_oneof:"decoding_config"`
// Speech recognition features to enable.
Features *RecognitionFeatures `protobuf:"bytes,2,opt,name=features,proto3" json:"features,omitempty"`
// Speech adaptation context that weights recognizer predictions for specific
// words and phrases.
Adaptation *SpeechAdaptation `protobuf:"bytes,6,opt,name=adaptation,proto3" json:"adaptation,omitempty"`
// contains filtered or unexported fields
}
Provides information to the Recognizer that specifies how to process the recognition request.
func (*RecognitionConfig) Descriptor
func (*RecognitionConfig) Descriptor() ([]byte, []int)
Deprecated: Use RecognitionConfig.ProtoReflect.Descriptor instead.
func (*RecognitionConfig) GetAdaptation
func (x *RecognitionConfig) GetAdaptation() *SpeechAdaptation
func (*RecognitionConfig) GetAutoDecodingConfig
func (x *RecognitionConfig) GetAutoDecodingConfig() *AutoDetectDecodingConfig
func (*RecognitionConfig) GetDecodingConfig
func (m *RecognitionConfig) GetDecodingConfig() isRecognitionConfig_DecodingConfig
func (*RecognitionConfig) GetExplicitDecodingConfig
func (x *RecognitionConfig) GetExplicitDecodingConfig() *ExplicitDecodingConfig
func (*RecognitionConfig) GetFeatures
func (x *RecognitionConfig) GetFeatures() *RecognitionFeatures
func (*RecognitionConfig) ProtoMessage
func (*RecognitionConfig) ProtoMessage()
func (*RecognitionConfig) ProtoReflect
func (x *RecognitionConfig) ProtoReflect() protoreflect.Message
func (*RecognitionConfig) Reset
func (x *RecognitionConfig) Reset()
func (*RecognitionConfig) String
func (x *RecognitionConfig) String() string
RecognitionConfig_AutoDecodingConfig
type RecognitionConfig_AutoDecodingConfig struct {
// Automatically detect decoding parameters.
// Preferred for supported formats.
AutoDecodingConfig *AutoDetectDecodingConfig `protobuf:"bytes,7,opt,name=auto_decoding_config,json=autoDecodingConfig,proto3,oneof"`
}
RecognitionConfig_ExplicitDecodingConfig
type RecognitionConfig_ExplicitDecodingConfig struct {
// Explicitly specified decoding parameters.
// Required if using headerless PCM audio (linear16, mulaw, alaw).
ExplicitDecodingConfig *ExplicitDecodingConfig `protobuf:"bytes,8,opt,name=explicit_decoding_config,json=explicitDecodingConfig,proto3,oneof"`
}
RecognitionFeatures
type RecognitionFeatures struct {
ProfanityFilter bool `protobuf:"varint,1,opt,name=profanity_filter,json=profanityFilter,proto3" json:"profanity_filter,omitempty"`
EnableWordTimeOffsets bool "" /* 129 byte string literal not displayed */
EnableWordConfidence bool `protobuf:"varint,3,opt,name=enable_word_confidence,json=enableWordConfidence,proto3" json:"enable_word_confidence,omitempty"`
EnableAutomaticPunctuation bool "" /* 142 byte string literal not displayed */
EnableSpokenPunctuation bool "" /* 134 byte string literal not displayed */
EnableSpokenEmojis bool `protobuf:"varint,15,opt,name=enable_spoken_emojis,json=enableSpokenEmojis,proto3" json:"enable_spoken_emojis,omitempty"`
MultiChannelMode RecognitionFeatures_MultiChannelMode "" /* 178 byte string literal not displayed */
DiarizationConfig *SpeakerDiarizationConfig `protobuf:"bytes,9,opt,name=diarization_config,json=diarizationConfig,proto3" json:"diarization_config,omitempty"`
MaxAlternatives int32 `protobuf:"varint,16,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
}
Available recognition features.
func (*RecognitionFeatures) Descriptor
func (*RecognitionFeatures) Descriptor() ([]byte, []int)
Deprecated: Use RecognitionFeatures.ProtoReflect.Descriptor instead.
func (*RecognitionFeatures) GetDiarizationConfig
func (x *RecognitionFeatures) GetDiarizationConfig() *SpeakerDiarizationConfig
func (*RecognitionFeatures) GetEnableAutomaticPunctuation
func (x *RecognitionFeatures) GetEnableAutomaticPunctuation() bool
func (*RecognitionFeatures) GetEnableSpokenEmojis
func (x *RecognitionFeatures) GetEnableSpokenEmojis() bool
func (*RecognitionFeatures) GetEnableSpokenPunctuation
func (x *RecognitionFeatures) GetEnableSpokenPunctuation() bool
func (*RecognitionFeatures) GetEnableWordConfidence
func (x *RecognitionFeatures) GetEnableWordConfidence() bool
func (*RecognitionFeatures) GetEnableWordTimeOffsets
func (x *RecognitionFeatures) GetEnableWordTimeOffsets() bool
func (*RecognitionFeatures) GetMaxAlternatives
func (x *RecognitionFeatures) GetMaxAlternatives() int32
func (*RecognitionFeatures) GetMultiChannelMode
func (x *RecognitionFeatures) GetMultiChannelMode() RecognitionFeatures_MultiChannelMode
func (*RecognitionFeatures) GetProfanityFilter
func (x *RecognitionFeatures) GetProfanityFilter() bool
func (*RecognitionFeatures) ProtoMessage
func (*RecognitionFeatures) ProtoMessage()
func (*RecognitionFeatures) ProtoReflect
func (x *RecognitionFeatures) ProtoReflect() protoreflect.Message
func (*RecognitionFeatures) Reset
func (x *RecognitionFeatures) Reset()
func (*RecognitionFeatures) String
func (x *RecognitionFeatures) String() string
RecognitionFeatures_MultiChannelMode
type RecognitionFeatures_MultiChannelMode int32
Options for how to recognize multi-channel audio.
RecognitionFeatures_MULTI_CHANNEL_MODE_UNSPECIFIED, RecognitionFeatures_SEPARATE_RECOGNITION_PER_CHANNEL
const (
// Default value for the multi-channel mode. If the audio contains
// multiple channels, only the first channel will be transcribed; other
// channels will be ignored.
RecognitionFeatures_MULTI_CHANNEL_MODE_UNSPECIFIED RecognitionFeatures_MultiChannelMode = 0
// If selected, each channel in the provided audio is transcribed
// independently. This cannot be selected if the selected
// [model][google.cloud.speech.v2.Recognizer.model] is `latest_short`.
RecognitionFeatures_SEPARATE_RECOGNITION_PER_CHANNEL RecognitionFeatures_MultiChannelMode = 1
)
func (RecognitionFeatures_MultiChannelMode) Descriptor
func (RecognitionFeatures_MultiChannelMode) Descriptor() protoreflect.EnumDescriptor
func (RecognitionFeatures_MultiChannelMode) Enum
func (x RecognitionFeatures_MultiChannelMode) Enum() *RecognitionFeatures_MultiChannelMode
func (RecognitionFeatures_MultiChannelMode) EnumDescriptor
func (RecognitionFeatures_MultiChannelMode) EnumDescriptor() ([]byte, []int)
Deprecated: Use RecognitionFeatures_MultiChannelMode.Descriptor instead.
func (RecognitionFeatures_MultiChannelMode) Number
func (x RecognitionFeatures_MultiChannelMode) Number() protoreflect.EnumNumber
func (RecognitionFeatures_MultiChannelMode) String
func (x RecognitionFeatures_MultiChannelMode) String() string
func (RecognitionFeatures_MultiChannelMode) Type
func (RecognitionFeatures_MultiChannelMode) Type() protoreflect.EnumType
RecognitionResponseMetadata
type RecognitionResponseMetadata struct {
// When available, billed audio seconds for the corresponding request.
TotalBilledDuration *durationpb.Duration `protobuf:"bytes,6,opt,name=total_billed_duration,json=totalBilledDuration,proto3" json:"total_billed_duration,omitempty"`
// contains filtered or unexported fields
}
Metadata about the recognition request and response.
func (*RecognitionResponseMetadata) Descriptor
func (*RecognitionResponseMetadata) Descriptor() ([]byte, []int)
Deprecated: Use RecognitionResponseMetadata.ProtoReflect.Descriptor instead.
func (*RecognitionResponseMetadata) GetTotalBilledDuration
func (x *RecognitionResponseMetadata) GetTotalBilledDuration() *durationpb.Duration
func (*RecognitionResponseMetadata) ProtoMessage
func (*RecognitionResponseMetadata) ProtoMessage()
func (*RecognitionResponseMetadata) ProtoReflect
func (x *RecognitionResponseMetadata) ProtoReflect() protoreflect.Message
func (*RecognitionResponseMetadata) Reset
func (x *RecognitionResponseMetadata) Reset()
func (*RecognitionResponseMetadata) String
func (x *RecognitionResponseMetadata) String() string
RecognizeRequest
type RecognizeRequest struct {
// Required. The name of the Recognizer to use during recognition. The
// expected format is
// `projects/{project}/locations/{location}/recognizers/{recognizer}`.
Recognizer string `protobuf:"bytes,3,opt,name=recognizer,proto3" json:"recognizer,omitempty"`
// Features and audio metadata to use for the Automatic Speech Recognition.
// This field in combination with the
// [config_mask][google.cloud.speech.v2.RecognizeRequest.config_mask] field
// can be used to override parts of the
// [default_recognition_config][google.cloud.speech.v2.Recognizer.default_recognition_config]
// of the Recognizer resource.
Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
// The list of fields in
// [config][google.cloud.speech.v2.RecognizeRequest.config] that override the
// values in the
// [default_recognition_config][google.cloud.speech.v2.Recognizer.default_recognition_config]
// of the recognizer during this recognition request. If no mask is provided,
// all non-default valued fields in
// [config][google.cloud.speech.v2.RecognizeRequest.config] override the
// values in the recognizer for this recognition request. If a mask is
// provided, only the fields listed in the mask override the config in the
// recognizer for this recognition request. If a wildcard (`*`) is provided,
// [config][google.cloud.speech.v2.RecognizeRequest.config] completely
// overrides and replaces the config in the recognizer for this recognition
// request.
ConfigMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=config_mask,json=configMask,proto3" json:"config_mask,omitempty"`
// The audio source, which is either inline content or a Google Cloud
// Storage URI.
//
// Types that are assignable to AudioSource:
//
// *RecognizeRequest_Content
// *RecognizeRequest_Uri
AudioSource isRecognizeRequest_AudioSource `protobuf_oneof:"audio_source"`
// contains filtered or unexported fields
}
Request message for the
[Recognize][google.cloud.speech.v2.Speech.Recognize] method. Either
content
or uri
must be supplied. Supplying both or neither returns
[INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See content
limits.
func (*RecognizeRequest) Descriptor
func (*RecognizeRequest) Descriptor() ([]byte, []int)
Deprecated: Use RecognizeRequest.ProtoReflect.Descriptor instead.
func (*RecognizeRequest) GetAudioSource
func (m *RecognizeRequest) GetAudioSource() isRecognizeRequest_AudioSource
func (*RecognizeRequest) GetConfig
func (x *RecognizeRequest) GetConfig() *RecognitionConfig
func (*RecognizeRequest) GetConfigMask
func (x *RecognizeRequest) GetConfigMask() *fieldmaskpb.FieldMask
func (*RecognizeRequest) GetContent
func (x *RecognizeRequest) GetContent() []byte
func (*RecognizeRequest) GetRecognizer
func (x *RecognizeRequest) GetRecognizer() string
func (*RecognizeRequest) GetUri
func (x *RecognizeRequest) GetUri() string
func (*RecognizeRequest) ProtoMessage
func (*RecognizeRequest) ProtoMessage()
func (*RecognizeRequest) ProtoReflect
func (x *RecognizeRequest) ProtoReflect() protoreflect.Message
func (*RecognizeRequest) Reset
func (x *RecognizeRequest) Reset()
func (*RecognizeRequest) String
func (x *RecognizeRequest) String() string
RecognizeRequest_Content
type RecognizeRequest_Content struct {
// The audio data bytes encoded as specified in
// [RecognitionConfig][google.cloud.speech.v2.RecognitionConfig]. As
// with all bytes fields, proto buffers use a pure binary representation,
// whereas JSON representations use base64.
Content []byte `protobuf:"bytes,5,opt,name=content,proto3,oneof"`
}
RecognizeRequest_Uri
type RecognizeRequest_Uri struct {
// URI that points to a file that contains audio data bytes as specified in
// [RecognitionConfig][google.cloud.speech.v2.RecognitionConfig]. The file
// must not be compressed (for example, gzip). Currently, only Google Cloud
// Storage URIs are supported, which must be specified in the following
// format: `gs://bucket_name/object_name` (other URI formats return
// [INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more
// information, see [Request
// URIs](https://cloud.google.com/storage/docs/reference-uris).
Uri string `protobuf:"bytes,6,opt,name=uri,proto3,oneof"`
}
RecognizeResponse
type RecognizeResponse struct {
// Sequential list of transcription results corresponding to sequential
// portions of audio.
Results []*SpeechRecognitionResult `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
// Metadata about the recognition.
Metadata *RecognitionResponseMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"`
// contains filtered or unexported fields
}
Response message for the [Recognize][google.cloud.speech.v2.Speech.Recognize] method.
func (*RecognizeResponse) Descriptor
func (*RecognizeResponse) Descriptor() ([]byte, []int)
Deprecated: Use RecognizeResponse.ProtoReflect.Descriptor instead.
func (*RecognizeResponse) GetMetadata
func (x *RecognizeResponse) GetMetadata() *RecognitionResponseMetadata
func (*RecognizeResponse) GetResults
func (x *RecognizeResponse) GetResults() []*SpeechRecognitionResult
func (*RecognizeResponse) ProtoMessage
func (*RecognizeResponse) ProtoMessage()
func (*RecognizeResponse) ProtoReflect
func (x *RecognizeResponse) ProtoReflect() protoreflect.Message
func (*RecognizeResponse) Reset
func (x *RecognizeResponse) Reset()
func (*RecognizeResponse) String
func (x *RecognizeResponse) String() string
Recognizer
type Recognizer struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Uid string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"`
DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
Model string `protobuf:"bytes,4,opt,name=model,proto3" json:"model,omitempty"`
LanguageCodes []string `protobuf:"bytes,17,rep,name=language_codes,json=languageCodes,proto3" json:"language_codes,omitempty"`
DefaultRecognitionConfig *RecognitionConfig "" /* 135 byte string literal not displayed */
Annotations map[string]string "" /* 163 byte string literal not displayed */
State Recognizer_State `protobuf:"varint,8,opt,name=state,proto3,enum=google.cloud.speech.v2.Recognizer_State" json:"state,omitempty"`
CreateTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
UpdateTime *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
DeleteTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"`
ExpireTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
Etag string `protobuf:"bytes,12,opt,name=etag,proto3" json:"etag,omitempty"`
Reconciling bool `protobuf:"varint,13,opt,name=reconciling,proto3" json:"reconciling,omitempty"`
KmsKeyName string `protobuf:"bytes,15,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"`
KmsKeyVersionName string `protobuf:"bytes,16,opt,name=kms_key_version_name,json=kmsKeyVersionName,proto3" json:"kms_key_version_name,omitempty"`
}
A Recognizer message. Stores recognition configuration and metadata.
func (*Recognizer) Descriptor
func (*Recognizer) Descriptor() ([]byte, []int)
Deprecated: Use Recognizer.ProtoReflect.Descriptor instead.
func (*Recognizer) GetAnnotations
func (x *Recognizer) GetAnnotations() map[string]string
func (*Recognizer) GetCreateTime
func (x *Recognizer) GetCreateTime() *timestamppb.Timestamp
func (*Recognizer) GetDefaultRecognitionConfig
func (x *Recognizer) GetDefaultRecognitionConfig() *RecognitionConfig
func (*Recognizer) GetDeleteTime
func (x *Recognizer) GetDeleteTime() *timestamppb.Timestamp
func (*Recognizer) GetDisplayName
func (x *Recognizer) GetDisplayName() string
func (*Recognizer) GetEtag
func (x *Recognizer) GetEtag() string
func (*Recognizer) GetExpireTime
func (x *Recognizer) GetExpireTime() *timestamppb.Timestamp
func (*Recognizer) GetKmsKeyName
func (x *Recognizer) GetKmsKeyName() string
func (*Recognizer) GetKmsKeyVersionName
func (x *Recognizer) GetKmsKeyVersionName() string
func (*Recognizer) GetLanguageCodes
func (x *Recognizer) GetLanguageCodes() []string
func (*Recognizer) GetModel
func (x *Recognizer) GetModel() string
func (*Recognizer) GetName
func (x *Recognizer) GetName() string
func (*Recognizer) GetReconciling
func (x *Recognizer) GetReconciling() bool
func (*Recognizer) GetState
func (x *Recognizer) GetState() Recognizer_State
func (*Recognizer) GetUid
func (x *Recognizer) GetUid() string
func (*Recognizer) GetUpdateTime
func (x *Recognizer) GetUpdateTime() *timestamppb.Timestamp
func (*Recognizer) ProtoMessage
func (*Recognizer) ProtoMessage()
func (*Recognizer) ProtoReflect
func (x *Recognizer) ProtoReflect() protoreflect.Message
func (*Recognizer) Reset
func (x *Recognizer) Reset()
func (*Recognizer) String
func (x *Recognizer) String() string
Recognizer_State
type Recognizer_State int32
Set of states that define the lifecycle of a Recognizer.
Recognizer_STATE_UNSPECIFIED, Recognizer_ACTIVE, Recognizer_DELETED
const (
// The default value. This value is used if the state is omitted.
Recognizer_STATE_UNSPECIFIED Recognizer_State = 0
// The Recognizer is active and ready for use.
Recognizer_ACTIVE Recognizer_State = 2
// This Recognizer has been deleted.
Recognizer_DELETED Recognizer_State = 4
)
func (Recognizer_State) Descriptor
func (Recognizer_State) Descriptor() protoreflect.EnumDescriptor
func (Recognizer_State) Enum
func (x Recognizer_State) Enum() *Recognizer_State
func (Recognizer_State) EnumDescriptor
func (Recognizer_State) EnumDescriptor() ([]byte, []int)
Deprecated: Use Recognizer_State.Descriptor instead.
func (Recognizer_State) Number
func (x Recognizer_State) Number() protoreflect.EnumNumber
func (Recognizer_State) String
func (x Recognizer_State) String() string
func (Recognizer_State) Type
func (Recognizer_State) Type() protoreflect.EnumType
SpeakerDiarizationConfig
type SpeakerDiarizationConfig struct {
// Required. Minimum number of speakers in the conversation. This range gives
// you more flexibility by allowing the system to automatically determine the
// correct number of speakers. If not set, the default value is 2.
//
// To fix the number of speakers detected in the audio, set
// `min_speaker_count` = `max_speaker_count`.
MinSpeakerCount int32 `protobuf:"varint,2,opt,name=min_speaker_count,json=minSpeakerCount,proto3" json:"min_speaker_count,omitempty"`
// Required. Maximum number of speakers in the conversation. Valid values are:
// 1-6. Must be >= `min_speaker_count`. This range gives you more flexibility
// by allowing the system to automatically determine the correct number of
// speakers.
MaxSpeakerCount int32 `protobuf:"varint,3,opt,name=max_speaker_count,json=maxSpeakerCount,proto3" json:"max_speaker_count,omitempty"`
// contains filtered or unexported fields
}
Configuration to enable speaker diarization.
func (*SpeakerDiarizationConfig) Descriptor
func (*SpeakerDiarizationConfig) Descriptor() ([]byte, []int)
Deprecated: Use SpeakerDiarizationConfig.ProtoReflect.Descriptor instead.
func (*SpeakerDiarizationConfig) GetMaxSpeakerCount
func (x *SpeakerDiarizationConfig) GetMaxSpeakerCount() int32
func (*SpeakerDiarizationConfig) GetMinSpeakerCount
func (x *SpeakerDiarizationConfig) GetMinSpeakerCount() int32
func (*SpeakerDiarizationConfig) ProtoMessage
func (*SpeakerDiarizationConfig) ProtoMessage()
func (*SpeakerDiarizationConfig) ProtoReflect
func (x *SpeakerDiarizationConfig) ProtoReflect() protoreflect.Message
func (*SpeakerDiarizationConfig) Reset
func (x *SpeakerDiarizationConfig) Reset()
func (*SpeakerDiarizationConfig) String
func (x *SpeakerDiarizationConfig) String() string
SpeechAdaptation
type SpeechAdaptation struct {
// A list of inline or referenced phrase sets.
PhraseSets []*SpeechAdaptation_AdaptationPhraseSet `protobuf:"bytes,1,rep,name=phrase_sets,json=phraseSets,proto3" json:"phrase_sets,omitempty"`
// A list of inline custom classes. Existing custom class resources can be
// referenced directly in a phrase set.
CustomClasses []*CustomClass `protobuf:"bytes,2,rep,name=custom_classes,json=customClasses,proto3" json:"custom_classes,omitempty"`
// contains filtered or unexported fields
}
Provides "hints" to the speech recognizer to favor specific words and phrases in the results. Phrase sets can be specified as an inline resource, or a reference to an existing phrase set resource.
func (*SpeechAdaptation) Descriptor
func (*SpeechAdaptation) Descriptor() ([]byte, []int)
Deprecated: Use SpeechAdaptation.ProtoReflect.Descriptor instead.
func (*SpeechAdaptation) GetCustomClasses
func (x *SpeechAdaptation) GetCustomClasses() []*CustomClass
func (*SpeechAdaptation) GetPhraseSets
func (x *SpeechAdaptation) GetPhraseSets() []*SpeechAdaptation_AdaptationPhraseSet
func (*SpeechAdaptation) ProtoMessage
func (*SpeechAdaptation) ProtoMessage()
func (*SpeechAdaptation) ProtoReflect
func (x *SpeechAdaptation) ProtoReflect() protoreflect.Message
func (*SpeechAdaptation) Reset
func (x *SpeechAdaptation) Reset()
func (*SpeechAdaptation) String
func (x *SpeechAdaptation) String() string
SpeechAdaptation_AdaptationPhraseSet
type SpeechAdaptation_AdaptationPhraseSet struct {
// Types that are assignable to Value:
//
// *SpeechAdaptation_AdaptationPhraseSet_PhraseSet
// *SpeechAdaptation_AdaptationPhraseSet_InlinePhraseSet
Value isSpeechAdaptation_AdaptationPhraseSet_Value `protobuf_oneof:"value"`
// contains filtered or unexported fields
}
A biasing phrase set, which can be either a string referencing the name of an existing phrase set resource, or an inline definition of a phrase set.
func (*SpeechAdaptation_AdaptationPhraseSet) Descriptor
func (*SpeechAdaptation_AdaptationPhraseSet) Descriptor() ([]byte, []int)
Deprecated: Use SpeechAdaptation_AdaptationPhraseSet.ProtoReflect.Descriptor instead.
func (*SpeechAdaptation_AdaptationPhraseSet) GetInlinePhraseSet
func (x *SpeechAdaptation_AdaptationPhraseSet) GetInlinePhraseSet() *PhraseSet
func (*SpeechAdaptation_AdaptationPhraseSet) GetPhraseSet
func (x *SpeechAdaptation_AdaptationPhraseSet) GetPhraseSet() string
func (*SpeechAdaptation_AdaptationPhraseSet) GetValue
func (m *SpeechAdaptation_AdaptationPhraseSet) GetValue() isSpeechAdaptation_AdaptationPhraseSet_Value
func (*SpeechAdaptation_AdaptationPhraseSet) ProtoMessage
func (*SpeechAdaptation_AdaptationPhraseSet) ProtoMessage()
func (*SpeechAdaptation_AdaptationPhraseSet) ProtoReflect
func (x *SpeechAdaptation_AdaptationPhraseSet) ProtoReflect() protoreflect.Message
func (*SpeechAdaptation_AdaptationPhraseSet) Reset
func (x *SpeechAdaptation_AdaptationPhraseSet) Reset()
func (*SpeechAdaptation_AdaptationPhraseSet) String
func (x *SpeechAdaptation_AdaptationPhraseSet) String() string
SpeechAdaptation_AdaptationPhraseSet_InlinePhraseSet
type SpeechAdaptation_AdaptationPhraseSet_InlinePhraseSet struct {
// An inline defined phrase set.
InlinePhraseSet *PhraseSet `protobuf:"bytes,2,opt,name=inline_phrase_set,json=inlinePhraseSet,proto3,oneof"`
}
SpeechAdaptation_AdaptationPhraseSet_PhraseSet
type SpeechAdaptation_AdaptationPhraseSet_PhraseSet struct {
// The name of an existing phrase set resource. The user must have read
// access to the resource and it must not be deleted.
PhraseSet string `protobuf:"bytes,1,opt,name=phrase_set,json=phraseSet,proto3,oneof"`
}
SpeechClient
type SpeechClient interface {
// Creates a [Recognizer][google.cloud.speech.v2.Recognizer].
CreateRecognizer(ctx context.Context, in *CreateRecognizerRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Lists Recognizers.
ListRecognizers(ctx context.Context, in *ListRecognizersRequest, opts ...grpc.CallOption) (*ListRecognizersResponse, error)
// Returns the requested
// [Recognizer][google.cloud.speech.v2.Recognizer]. Fails with
// [NOT_FOUND][google.rpc.Code.NOT_FOUND] if the requested recognizer doesn't
// exist.
GetRecognizer(ctx context.Context, in *GetRecognizerRequest, opts ...grpc.CallOption) (*Recognizer, error)
// Updates the [Recognizer][google.cloud.speech.v2.Recognizer].
UpdateRecognizer(ctx context.Context, in *UpdateRecognizerRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Deletes the [Recognizer][google.cloud.speech.v2.Recognizer].
DeleteRecognizer(ctx context.Context, in *DeleteRecognizerRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Undeletes the [Recognizer][google.cloud.speech.v2.Recognizer].
UndeleteRecognizer(ctx context.Context, in *UndeleteRecognizerRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Performs synchronous Speech recognition: receive results after all audio
// has been sent and processed.
Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error)
// Performs bidirectional streaming speech recognition: receive results while
// sending audio. This method is only available via the gRPC API (not REST).
StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error)
// Performs batch asynchronous speech recognition: send a request with N
// audio files and receive a long running operation that can be polled to see
// when the transcriptions are finished.
BatchRecognize(ctx context.Context, in *BatchRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Returns the requested [Config][google.cloud.speech.v2.Config].
GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*Config, error)
// Updates the [Config][google.cloud.speech.v2.Config].
UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*Config, error)
// Creates a [CustomClass][google.cloud.speech.v2.CustomClass].
CreateCustomClass(ctx context.Context, in *CreateCustomClassRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Lists CustomClasses.
ListCustomClasses(ctx context.Context, in *ListCustomClassesRequest, opts ...grpc.CallOption) (*ListCustomClassesResponse, error)
// Returns the requested
// [CustomClass][google.cloud.speech.v2.CustomClass].
GetCustomClass(ctx context.Context, in *GetCustomClassRequest, opts ...grpc.CallOption) (*CustomClass, error)
// Updates the [CustomClass][google.cloud.speech.v2.CustomClass].
UpdateCustomClass(ctx context.Context, in *UpdateCustomClassRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Deletes the [CustomClass][google.cloud.speech.v2.CustomClass].
DeleteCustomClass(ctx context.Context, in *DeleteCustomClassRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Undeletes the [CustomClass][google.cloud.speech.v2.CustomClass].
UndeleteCustomClass(ctx context.Context, in *UndeleteCustomClassRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Creates a [PhraseSet][google.cloud.speech.v2.PhraseSet].
CreatePhraseSet(ctx context.Context, in *CreatePhraseSetRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Lists PhraseSets.
ListPhraseSets(ctx context.Context, in *ListPhraseSetsRequest, opts ...grpc.CallOption) (*ListPhraseSetsResponse, error)
// Returns the requested
// [PhraseSet][google.cloud.speech.v2.PhraseSet].
GetPhraseSet(ctx context.Context, in *GetPhraseSetRequest, opts ...grpc.CallOption) (*PhraseSet, error)
// Updates the [PhraseSet][google.cloud.speech.v2.PhraseSet].
UpdatePhraseSet(ctx context.Context, in *UpdatePhraseSetRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Deletes the [PhraseSet][google.cloud.speech.v2.PhraseSet].
DeletePhraseSet(ctx context.Context, in *DeletePhraseSetRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Undeletes the [PhraseSet][google.cloud.speech.v2.PhraseSet].
UndeletePhraseSet(ctx context.Context, in *UndeletePhraseSetRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
}
SpeechClient is the client API for Speech service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewSpeechClient
func NewSpeechClient(cc grpc.ClientConnInterface) SpeechClient
SpeechRecognitionAlternative
type SpeechRecognitionAlternative struct {
// Transcript text representing the words that the user spoke.
Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
// The confidence estimate between 0.0 and 1.0. A higher number
// indicates an estimated greater likelihood that the recognized words are
// correct. This field is set only for the top alternative of a non-streaming
// result or, of a streaming result where
// [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final] is
// set to `true`. This field is not guaranteed to be accurate and users should
// not rely on it to be always provided. The default of 0.0 is a sentinel
// value indicating `confidence` was not set.
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// A list of word-specific information for each recognized word.
// When
// [enable_speaker_diarization][google.cloud.speech.v2.SpeakerDiarizationConfig.enable_speaker_diarization]
// is true, you will see all the words from the beginning of the audio.
Words []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
// contains filtered or unexported fields
}
Alternative hypotheses (a.k.a. n-best list).
func (*SpeechRecognitionAlternative) Descriptor
func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int)
Deprecated: Use SpeechRecognitionAlternative.ProtoReflect.Descriptor instead.
func (*SpeechRecognitionAlternative) GetConfidence
func (x *SpeechRecognitionAlternative) GetConfidence() float32
func (*SpeechRecognitionAlternative) GetTranscript
func (x *SpeechRecognitionAlternative) GetTranscript() string
func (*SpeechRecognitionAlternative) GetWords
func (x *SpeechRecognitionAlternative) GetWords() []*WordInfo
func (*SpeechRecognitionAlternative) ProtoMessage
func (*SpeechRecognitionAlternative) ProtoMessage()
func (*SpeechRecognitionAlternative) ProtoReflect
func (x *SpeechRecognitionAlternative) ProtoReflect() protoreflect.Message
func (*SpeechRecognitionAlternative) Reset
func (x *SpeechRecognitionAlternative) Reset()
func (*SpeechRecognitionAlternative) String
func (x *SpeechRecognitionAlternative) String() string
SpeechRecognitionResult
type SpeechRecognitionResult struct {
// May contain one or more recognition hypotheses. These alternatives are
// ordered in terms of accuracy, with the top (first) alternative being the
// most probable, as ranked by the recognizer.
Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
// For multi-channel audio, this is the channel number corresponding to the
// recognized result for the audio from that channel.
// For `audio_channel_count` = `N`, its output values can range from `1` to
// `N`.
ChannelTag int32 `protobuf:"varint,2,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"`
// Time offset of the end of this result relative to the beginning of the
// audio.
ResultEndOffset *durationpb.Duration `protobuf:"bytes,4,opt,name=result_end_offset,json=resultEndOffset,proto3" json:"result_end_offset,omitempty"`
// Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
// language tag of the language in this result. This language code was
// detected to have the most likelihood of being spoken in the audio.
LanguageCode string `protobuf:"bytes,5,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// contains filtered or unexported fields
}
A speech recognition result corresponding to a portion of the audio.
func (*SpeechRecognitionResult) Descriptor
func (*SpeechRecognitionResult) Descriptor() ([]byte, []int)
Deprecated: Use SpeechRecognitionResult.ProtoReflect.Descriptor instead.
func (*SpeechRecognitionResult) GetAlternatives
func (x *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative
func (*SpeechRecognitionResult) GetChannelTag
func (x *SpeechRecognitionResult) GetChannelTag() int32
func (*SpeechRecognitionResult) GetLanguageCode
func (x *SpeechRecognitionResult) GetLanguageCode() string
func (*SpeechRecognitionResult) GetResultEndOffset
func (x *SpeechRecognitionResult) GetResultEndOffset() *durationpb.Duration
func (*SpeechRecognitionResult) ProtoMessage
func (*SpeechRecognitionResult) ProtoMessage()
func (*SpeechRecognitionResult) ProtoReflect
func (x *SpeechRecognitionResult) ProtoReflect() protoreflect.Message
func (*SpeechRecognitionResult) Reset
func (x *SpeechRecognitionResult) Reset()
func (*SpeechRecognitionResult) String
func (x *SpeechRecognitionResult) String() string
SpeechServer
type SpeechServer interface {
// Creates a [Recognizer][google.cloud.speech.v2.Recognizer].
CreateRecognizer(context.Context, *CreateRecognizerRequest) (*longrunning.Operation, error)
// Lists Recognizers.
ListRecognizers(context.Context, *ListRecognizersRequest) (*ListRecognizersResponse, error)
// Returns the requested
// [Recognizer][google.cloud.speech.v2.Recognizer]. Fails with
// [NOT_FOUND][google.rpc.Code.NOT_FOUND] if the requested recognizer doesn't
// exist.
GetRecognizer(context.Context, *GetRecognizerRequest) (*Recognizer, error)
// Updates the [Recognizer][google.cloud.speech.v2.Recognizer].
UpdateRecognizer(context.Context, *UpdateRecognizerRequest) (*longrunning.Operation, error)
// Deletes the [Recognizer][google.cloud.speech.v2.Recognizer].
DeleteRecognizer(context.Context, *DeleteRecognizerRequest) (*longrunning.Operation, error)
// Undeletes the [Recognizer][google.cloud.speech.v2.Recognizer].
UndeleteRecognizer(context.Context, *UndeleteRecognizerRequest) (*longrunning.Operation, error)
// Performs synchronous Speech recognition: receive results after all audio
// has been sent and processed.
Recognize(context.Context, *RecognizeRequest) (*RecognizeResponse, error)
// Performs bidirectional streaming speech recognition: receive results while
// sending audio. This method is only available via the gRPC API (not REST).
StreamingRecognize(Speech_StreamingRecognizeServer) error
// Performs batch asynchronous speech recognition: send a request with N
// audio files and receive a long running operation that can be polled to see
// when the transcriptions are finished.
BatchRecognize(context.Context, *BatchRecognizeRequest) (*longrunning.Operation, error)
// Returns the requested [Config][google.cloud.speech.v2.Config].
GetConfig(context.Context, *GetConfigRequest) (*Config, error)
// Updates the [Config][google.cloud.speech.v2.Config].
UpdateConfig(context.Context, *UpdateConfigRequest) (*Config, error)
// Creates a [CustomClass][google.cloud.speech.v2.CustomClass].
CreateCustomClass(context.Context, *CreateCustomClassRequest) (*longrunning.Operation, error)
// Lists CustomClasses.
ListCustomClasses(context.Context, *ListCustomClassesRequest) (*ListCustomClassesResponse, error)
// Returns the requested
// [CustomClass][google.cloud.speech.v2.CustomClass].
GetCustomClass(context.Context, *GetCustomClassRequest) (*CustomClass, error)
// Updates the [CustomClass][google.cloud.speech.v2.CustomClass].
UpdateCustomClass(context.Context, *UpdateCustomClassRequest) (*longrunning.Operation, error)
// Deletes the [CustomClass][google.cloud.speech.v2.CustomClass].
DeleteCustomClass(context.Context, *DeleteCustomClassRequest) (*longrunning.Operation, error)
// Undeletes the [CustomClass][google.cloud.speech.v2.CustomClass].
UndeleteCustomClass(context.Context, *UndeleteCustomClassRequest) (*longrunning.Operation, error)
// Creates a [PhraseSet][google.cloud.speech.v2.PhraseSet].
CreatePhraseSet(context.Context, *CreatePhraseSetRequest) (*longrunning.Operation, error)
// Lists PhraseSets.
ListPhraseSets(context.Context, *ListPhraseSetsRequest) (*ListPhraseSetsResponse, error)
// Returns the requested
// [PhraseSet][google.cloud.speech.v2.PhraseSet].
GetPhraseSet(context.Context, *GetPhraseSetRequest) (*PhraseSet, error)
// Updates the [PhraseSet][google.cloud.speech.v2.PhraseSet].
UpdatePhraseSet(context.Context, *UpdatePhraseSetRequest) (*longrunning.Operation, error)
// Deletes the [PhraseSet][google.cloud.speech.v2.PhraseSet].
DeletePhraseSet(context.Context, *DeletePhraseSetRequest) (*longrunning.Operation, error)
// Undeletes the [PhraseSet][google.cloud.speech.v2.PhraseSet].
UndeletePhraseSet(context.Context, *UndeletePhraseSetRequest) (*longrunning.Operation, error)
}
SpeechServer is the server API for Speech service.
Speech_StreamingRecognizeClient
type Speech_StreamingRecognizeClient interface {
Send(*StreamingRecognizeRequest) error
Recv() (*StreamingRecognizeResponse, error)
grpc.ClientStream
}
Speech_StreamingRecognizeServer
type Speech_StreamingRecognizeServer interface {
Send(*StreamingRecognizeResponse) error
Recv() (*StreamingRecognizeRequest, error)
grpc.ServerStream
}
StreamingRecognitionConfig
type StreamingRecognitionConfig struct {
// Required. Features and audio metadata to use for the Automatic Speech
// Recognition. This field in combination with the
// [config_mask][google.cloud.speech.v2.StreamingRecognitionConfig.config_mask]
// field can be used to override parts of the
// [default_recognition_config][google.cloud.speech.v2.Recognizer.default_recognition_config]
// of the Recognizer resource.
Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
// The list of fields in
// [config][google.cloud.speech.v2.StreamingRecognitionConfig.config] that
// override the values in the
// [default_recognition_config][google.cloud.speech.v2.Recognizer.default_recognition_config]
// of the recognizer during this recognition request. If no mask is provided,
// all non-default valued fields in
// [config][google.cloud.speech.v2.StreamingRecognitionConfig.config] override
// the values in the recognizer for this recognition request. If a mask is
// provided, only the fields listed in the mask override the config in the
// recognizer for this recognition request. If a wildcard (`*`) is provided,
// [config][google.cloud.speech.v2.StreamingRecognitionConfig.config]
// completely overrides and replaces the config in the recognizer for this
// recognition request.
ConfigMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=config_mask,json=configMask,proto3" json:"config_mask,omitempty"`
// Speech recognition features to enable specific to streaming audio
// recognition requests.
StreamingFeatures *StreamingRecognitionFeatures `protobuf:"bytes,2,opt,name=streaming_features,json=streamingFeatures,proto3" json:"streaming_features,omitempty"`
// contains filtered or unexported fields
}
Provides configuration information for the StreamingRecognize request.
func (*StreamingRecognitionConfig) Descriptor
func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int)
Deprecated: Use StreamingRecognitionConfig.ProtoReflect.Descriptor instead.
func (*StreamingRecognitionConfig) GetConfig
func (x *StreamingRecognitionConfig) GetConfig() *RecognitionConfig
func (*StreamingRecognitionConfig) GetConfigMask
func (x *StreamingRecognitionConfig) GetConfigMask() *fieldmaskpb.FieldMask
func (*StreamingRecognitionConfig) GetStreamingFeatures
func (x *StreamingRecognitionConfig) GetStreamingFeatures() *StreamingRecognitionFeatures
func (*StreamingRecognitionConfig) ProtoMessage
func (*StreamingRecognitionConfig) ProtoMessage()
func (*StreamingRecognitionConfig) ProtoReflect
func (x *StreamingRecognitionConfig) ProtoReflect() protoreflect.Message
func (*StreamingRecognitionConfig) Reset
func (x *StreamingRecognitionConfig) Reset()
func (*StreamingRecognitionConfig) String
func (x *StreamingRecognitionConfig) String() string
StreamingRecognitionFeatures
type StreamingRecognitionFeatures struct {
EnableVoiceActivityEvents bool "" /* 141 byte string literal not displayed */
InterimResults bool `protobuf:"varint,2,opt,name=interim_results,json=interimResults,proto3" json:"interim_results,omitempty"`
VoiceActivityTimeout *StreamingRecognitionFeatures_VoiceActivityTimeout `protobuf:"bytes,3,opt,name=voice_activity_timeout,json=voiceActivityTimeout,proto3" json:"voice_activity_timeout,omitempty"`
}
Available recognition features specific to streaming recognition requests.
func (*StreamingRecognitionFeatures) Descriptor
func (*StreamingRecognitionFeatures) Descriptor() ([]byte, []int)
Deprecated: Use StreamingRecognitionFeatures.ProtoReflect.Descriptor instead.
func (*StreamingRecognitionFeatures) GetEnableVoiceActivityEvents
func (x *StreamingRecognitionFeatures) GetEnableVoiceActivityEvents() bool
func (*StreamingRecognitionFeatures) GetInterimResults
func (x *StreamingRecognitionFeatures) GetInterimResults() bool
func (*StreamingRecognitionFeatures) GetVoiceActivityTimeout
func (x *StreamingRecognitionFeatures) GetVoiceActivityTimeout() *StreamingRecognitionFeatures_VoiceActivityTimeout
func (*StreamingRecognitionFeatures) ProtoMessage
func (*StreamingRecognitionFeatures) ProtoMessage()
func (*StreamingRecognitionFeatures) ProtoReflect
func (x *StreamingRecognitionFeatures) ProtoReflect() protoreflect.Message
func (*StreamingRecognitionFeatures) Reset
func (x *StreamingRecognitionFeatures) Reset()
func (*StreamingRecognitionFeatures) String
func (x *StreamingRecognitionFeatures) String() string
StreamingRecognitionFeatures_VoiceActivityTimeout
type StreamingRecognitionFeatures_VoiceActivityTimeout struct {
// Duration to timeout the stream if no speech begins. If this is set and
// no speech is detected in this duration at the start of the stream, the
// server will close the stream.
SpeechStartTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=speech_start_timeout,json=speechStartTimeout,proto3" json:"speech_start_timeout,omitempty"`
// Duration to timeout the stream after speech ends. If this is set and no
// speech is detected in this duration after speech was detected, the server
// will close the stream.
SpeechEndTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=speech_end_timeout,json=speechEndTimeout,proto3" json:"speech_end_timeout,omitempty"`
// contains filtered or unexported fields
}
Events that a timeout can be set on for voice activity.
func (*StreamingRecognitionFeatures_VoiceActivityTimeout) Descriptor
func (*StreamingRecognitionFeatures_VoiceActivityTimeout) Descriptor() ([]byte, []int)
Deprecated: Use StreamingRecognitionFeatures_VoiceActivityTimeout.ProtoReflect.Descriptor instead.
func (*StreamingRecognitionFeatures_VoiceActivityTimeout) GetSpeechEndTimeout
func (x *StreamingRecognitionFeatures_VoiceActivityTimeout) GetSpeechEndTimeout() *durationpb.Duration
func (*StreamingRecognitionFeatures_VoiceActivityTimeout) GetSpeechStartTimeout
func (x *StreamingRecognitionFeatures_VoiceActivityTimeout) GetSpeechStartTimeout() *durationpb.Duration
func (*StreamingRecognitionFeatures_VoiceActivityTimeout) ProtoMessage
func (*StreamingRecognitionFeatures_VoiceActivityTimeout) ProtoMessage()
func (*StreamingRecognitionFeatures_VoiceActivityTimeout) ProtoReflect
func (x *StreamingRecognitionFeatures_VoiceActivityTimeout) ProtoReflect() protoreflect.Message
func (*StreamingRecognitionFeatures_VoiceActivityTimeout) Reset
func (x *StreamingRecognitionFeatures_VoiceActivityTimeout) Reset()
func (*StreamingRecognitionFeatures_VoiceActivityTimeout) String
func (x *StreamingRecognitionFeatures_VoiceActivityTimeout) String() string
StreamingRecognitionResult
type StreamingRecognitionResult struct {
// May contain one or more recognition hypotheses. These alternatives are
// ordered in terms of accuracy, with the top (first) alternative being the
// most probable, as ranked by the recognizer.
Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
// If `false`, this
// [StreamingRecognitionResult][google.cloud.speech.v2.StreamingRecognitionResult]
// represents an interim result that may change. If `true`, this is the final
// time the speech service will return this particular
// [StreamingRecognitionResult][google.cloud.speech.v2.StreamingRecognitionResult],
// the recognizer will not return any further hypotheses for this portion of
// the transcript and corresponding audio.
IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal,proto3" json:"is_final,omitempty"`
// An estimate of the likelihood that the recognizer will not change its guess
// about this interim result. Values range from 0.0 (completely unstable)
// to 1.0 (completely stable). This field is only provided for interim results
// ([is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final]=`false`).
// The default of 0.0 is a sentinel value indicating `stability` was not set.
Stability float32 `protobuf:"fixed32,3,opt,name=stability,proto3" json:"stability,omitempty"`
// Time offset of the end of this result relative to the beginning of the
// audio.
ResultEndOffset *durationpb.Duration `protobuf:"bytes,4,opt,name=result_end_offset,json=resultEndOffset,proto3" json:"result_end_offset,omitempty"`
// For multi-channel audio, this is the channel number corresponding to the
// recognized result for the audio from that channel.
// For
// `audio_channel_count` = `N`, its output values can range from `1` to `N`.
ChannelTag int32 `protobuf:"varint,5,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"`
// Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
// language tag of the language in this result. This language code was
// detected to have the most likelihood of being spoken in the audio.
LanguageCode string `protobuf:"bytes,6,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// contains filtered or unexported fields
}
A streaming speech recognition result corresponding to a portion of the audio that is currently being processed.
func (*StreamingRecognitionResult) Descriptor
func (*StreamingRecognitionResult) Descriptor() ([]byte, []int)
Deprecated: Use StreamingRecognitionResult.ProtoReflect.Descriptor instead.
func (*StreamingRecognitionResult) GetAlternatives
func (x *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative
func (*StreamingRecognitionResult) GetChannelTag
func (x *StreamingRecognitionResult) GetChannelTag() int32
func (*StreamingRecognitionResult) GetIsFinal
func (x *StreamingRecognitionResult) GetIsFinal() bool
func (*StreamingRecognitionResult) GetLanguageCode
func (x *StreamingRecognitionResult) GetLanguageCode() string
func (*StreamingRecognitionResult) GetResultEndOffset
func (x *StreamingRecognitionResult) GetResultEndOffset() *durationpb.Duration
func (*StreamingRecognitionResult) GetStability
func (x *StreamingRecognitionResult) GetStability() float32
func (*StreamingRecognitionResult) ProtoMessage
func (*StreamingRecognitionResult) ProtoMessage()
func (*StreamingRecognitionResult) ProtoReflect
func (x *StreamingRecognitionResult) ProtoReflect() protoreflect.Message
func (*StreamingRecognitionResult) Reset
func (x *StreamingRecognitionResult) Reset()
func (*StreamingRecognitionResult) String
func (x *StreamingRecognitionResult) String() string
StreamingRecognizeRequest
type StreamingRecognizeRequest struct {
// Required. Streaming recognition should start with an initial request having
// a `recognizer`. Subsequent requests carry the audio data to be recognized.
//
// The initial request with configuration can be omitted if the Recognizer
// being used has a
// [default_recognition_config][google.cloud.speech.v2.Recognizer.default_recognition_config].
Recognizer string `protobuf:"bytes,3,opt,name=recognizer,proto3" json:"recognizer,omitempty"`
// Types that are assignable to StreamingRequest:
//
// *StreamingRecognizeRequest_StreamingConfig
// *StreamingRecognizeRequest_Audio
StreamingRequest isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
// contains filtered or unexported fields
}
Request message for the [StreamingRecognize][google.cloud.speech.v2.Speech.StreamingRecognize] method. Multiple [StreamingRecognizeRequest][google.cloud.speech.v2.StreamingRecognizeRequest] messages are sent. The first message must contain a [recognizer][google.cloud.speech.v2.StreamingRecognizeRequest.recognizer] and optionally a [streaming_config][google.cloud.speech.v2.StreamingRecognizeRequest.streaming_config] message and must not contain [audio][google.cloud.speech.v2.StreamingRecognizeRequest.audio]. All subsequent messages must contain [audio][google.cloud.speech.v2.StreamingRecognizeRequest.audio] and must not contain a [streaming_config][google.cloud.speech.v2.StreamingRecognizeRequest.streaming_config] message.
func (*StreamingRecognizeRequest) Descriptor
func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int)
Deprecated: Use StreamingRecognizeRequest.ProtoReflect.Descriptor instead.
func (*StreamingRecognizeRequest) GetAudio
func (x *StreamingRecognizeRequest) GetAudio() []byte
func (*StreamingRecognizeRequest) GetRecognizer
func (x *StreamingRecognizeRequest) GetRecognizer() string
func (*StreamingRecognizeRequest) GetStreamingConfig
func (x *StreamingRecognizeRequest) GetStreamingConfig() *StreamingRecognitionConfig
func (*StreamingRecognizeRequest) GetStreamingRequest
func (m *StreamingRecognizeRequest) GetStreamingRequest() isStreamingRecognizeRequest_StreamingRequest
func (*StreamingRecognizeRequest) ProtoMessage
func (*StreamingRecognizeRequest) ProtoMessage()
func (*StreamingRecognizeRequest) ProtoReflect
func (x *StreamingRecognizeRequest) ProtoReflect() protoreflect.Message
func (*StreamingRecognizeRequest) Reset
func (x *StreamingRecognizeRequest) Reset()
func (*StreamingRecognizeRequest) String
func (x *StreamingRecognizeRequest) String() string
StreamingRecognizeRequest_Audio
type StreamingRecognizeRequest_Audio struct {
// Inline audio bytes to be Recognized.
Audio []byte `protobuf:"bytes,5,opt,name=audio,proto3,oneof"`
}
StreamingRecognizeRequest_StreamingConfig
type StreamingRecognizeRequest_StreamingConfig struct {
// StreamingRecognitionConfig to be used in this recognition attempt.
// If provided, it will override the default RecognitionConfig stored in the
// Recognizer.
StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,6,opt,name=streaming_config,json=streamingConfig,proto3,oneof"`
}
StreamingRecognizeResponse
type StreamingRecognizeResponse struct {
Results []*StreamingRecognitionResult `protobuf:"bytes,6,rep,name=results,proto3" json:"results,omitempty"`
SpeechEventType StreamingRecognizeResponse_SpeechEventType "" /* 180 byte string literal not displayed */
SpeechEventOffset *durationpb.Duration `protobuf:"bytes,7,opt,name=speech_event_offset,json=speechEventOffset,proto3" json:"speech_event_offset,omitempty"`
Metadata *RecognitionResponseMetadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"`
}
StreamingRecognizeResponse
is the only message returned to the client by
StreamingRecognize
. A series of zero or more StreamingRecognizeResponse
messages are streamed back to the client. If there is no recognizable
audio then no messages are streamed back to the client.
Here are some examples of StreamingRecognizeResponse
s that might
be returned while processing audio:
results { alternatives { transcript: "tube" } stability: 0.01 }
results { alternatives { transcript: "to be a" } stability: 0.01 }
results { alternatives { transcript: "to be" } stability: 0.9 } results { alternatives { transcript: " or not to be" } stability: 0.01 }
results { alternatives { transcript: "to be or not to be" confidence: 0.92 } alternatives { transcript: "to bee or not to bee" } is_final: true }
results { alternatives { transcript: " that's" } stability: 0.01 }
results { alternatives { transcript: " that is" } stability: 0.9 } results { alternatives { transcript: " the question" } stability: 0.01 }
results { alternatives { transcript: " that is the question" confidence: 0.98 } alternatives { transcript: " that was the question" } is_final: true }
Notes:
Only two of the above responses #4 and #7 contain final results; they are indicated by
is_final: true
. Concatenating these together generates the full transcript: "to be or not to be that is the question".The others contain interim
results
. #3 and #6 contain two interimresults
: the first portion has a high stability and is less likely to change; the second portion has a low stability and is very likely to change. A UI designer might choose to show only high stabilityresults
.The specific
stability
andconfidence
values shown above are only for illustrative purposes. Actual values may vary.In each response, only one of these fields will be set:
error
,speech_event_type
, or one or more (repeated)results
.
func (*StreamingRecognizeResponse) Descriptor
func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int)
Deprecated: Use StreamingRecognizeResponse.ProtoReflect.Descriptor instead.
func (*StreamingRecognizeResponse) GetMetadata
func (x *StreamingRecognizeResponse) GetMetadata() *RecognitionResponseMetadata
func (*StreamingRecognizeResponse) GetResults
func (x *StreamingRecognizeResponse) GetResults() []*StreamingRecognitionResult
func (*StreamingRecognizeResponse) GetSpeechEventOffset
func (x *StreamingRecognizeResponse) GetSpeechEventOffset() *durationpb.Duration
func (*StreamingRecognizeResponse) GetSpeechEventType
func (x *StreamingRecognizeResponse) GetSpeechEventType() StreamingRecognizeResponse_SpeechEventType
func (*StreamingRecognizeResponse) ProtoMessage
func (*StreamingRecognizeResponse) ProtoMessage()
func (*StreamingRecognizeResponse) ProtoReflect
func (x *StreamingRecognizeResponse) ProtoReflect() protoreflect.Message
func (*StreamingRecognizeResponse) Reset
func (x *StreamingRecognizeResponse) Reset()
func (*StreamingRecognizeResponse) String
func (x *StreamingRecognizeResponse) String() string
StreamingRecognizeResponse_SpeechEventType
type StreamingRecognizeResponse_SpeechEventType int32
Indicates the type of speech event.
StreamingRecognizeResponse_SPEECH_EVENT_TYPE_UNSPECIFIED, StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE, StreamingRecognizeResponse_SPEECH_ACTIVITY_BEGIN, StreamingRecognizeResponse_SPEECH_ACTIVITY_END
const (
// No speech event specified.
StreamingRecognizeResponse_SPEECH_EVENT_TYPE_UNSPECIFIED StreamingRecognizeResponse_SpeechEventType = 0
// This event indicates that the server has detected the end of the user's
// speech utterance and expects no additional speech. Therefore, the server
// will not process additional audio and will close the gRPC bidirectional
// stream. This event is only sent if there was a force cutoff due to
// silence being detected early. This event is only available through the
// `latest_short` [model][google.cloud.speech.v2.Recognizer.model].
StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE StreamingRecognizeResponse_SpeechEventType = 1
// This event indicates that the server has detected the beginning of human
// voice activity in the stream. This event can be returned multiple times
// if speech starts and stops repeatedly throughout the stream. This event
// is only sent if `voice_activity_events` is set to true.
StreamingRecognizeResponse_SPEECH_ACTIVITY_BEGIN StreamingRecognizeResponse_SpeechEventType = 2
// This event indicates that the server has detected the end of human voice
// activity in the stream. This event can be returned multiple times if
// speech starts and stops repeatedly throughout the stream. This event is
// only sent if `voice_activity_events` is set to true.
StreamingRecognizeResponse_SPEECH_ACTIVITY_END StreamingRecognizeResponse_SpeechEventType = 3
)
func (StreamingRecognizeResponse_SpeechEventType) Descriptor
func (StreamingRecognizeResponse_SpeechEventType) Descriptor() protoreflect.EnumDescriptor
func (StreamingRecognizeResponse_SpeechEventType) Enum
func (x StreamingRecognizeResponse_SpeechEventType) Enum() *StreamingRecognizeResponse_SpeechEventType
func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor
func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor() ([]byte, []int)
Deprecated: Use StreamingRecognizeResponse_SpeechEventType.Descriptor instead.
func (StreamingRecognizeResponse_SpeechEventType) Number
func (x StreamingRecognizeResponse_SpeechEventType) Number() protoreflect.EnumNumber
func (StreamingRecognizeResponse_SpeechEventType) String
func (x StreamingRecognizeResponse_SpeechEventType) String() string
func (StreamingRecognizeResponse_SpeechEventType) Type
func (StreamingRecognizeResponse_SpeechEventType) Type() protoreflect.EnumType
UndeleteCustomClassRequest
type UndeleteCustomClassRequest struct {
// Required. The name of the CustomClass to undelete.
// Format:
// `projects/{project}/locations/{location}/customClasses/{custom_class}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// If set, validate the request and preview the undeleted CustomClass, but do
// not actually undelete it.
ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
// This checksum is computed by the server based on the value of other
// fields. This may be sent on update, undelete, and delete requests to ensure
// the client has an up-to-date value before proceeding.
Etag string `protobuf:"bytes,4,opt,name=etag,proto3" json:"etag,omitempty"`
// contains filtered or unexported fields
}
Request message for the [UndeleteCustomClass][google.cloud.speech.v2.Speech.UndeleteCustomClass] method.
func (*UndeleteCustomClassRequest) Descriptor
func (*UndeleteCustomClassRequest) Descriptor() ([]byte, []int)
Deprecated: Use UndeleteCustomClassRequest.ProtoReflect.Descriptor instead.
func (*UndeleteCustomClassRequest) GetEtag
func (x *UndeleteCustomClassRequest) GetEtag() string
func (*UndeleteCustomClassRequest) GetName
func (x *UndeleteCustomClassRequest) GetName() string
func (*UndeleteCustomClassRequest) GetValidateOnly
func (x *UndeleteCustomClassRequest) GetValidateOnly() bool
func (*UndeleteCustomClassRequest) ProtoMessage
func (*UndeleteCustomClassRequest) ProtoMessage()
func (*UndeleteCustomClassRequest) ProtoReflect
func (x *UndeleteCustomClassRequest) ProtoReflect() protoreflect.Message
func (*UndeleteCustomClassRequest) Reset
func (x *UndeleteCustomClassRequest) Reset()
func (*UndeleteCustomClassRequest) String
func (x *UndeleteCustomClassRequest) String() string
UndeletePhraseSetRequest
type UndeletePhraseSetRequest struct {
// Required. The name of the PhraseSet to undelete.
// Format: `projects/{project}/locations/{location}/phraseSets/{phrase_set}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// If set, validate the request and preview the undeleted PhraseSet, but do
// not actually undelete it.
ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
// This checksum is computed by the server based on the value of other
// fields. This may be sent on update, undelete, and delete requests to ensure
// the client has an up-to-date value before proceeding.
Etag string `protobuf:"bytes,4,opt,name=etag,proto3" json:"etag,omitempty"`
// contains filtered or unexported fields
}
Request message for the [UndeletePhraseSet][google.cloud.speech.v2.Speech.UndeletePhraseSet] method.
func (*UndeletePhraseSetRequest) Descriptor
func (*UndeletePhraseSetRequest) Descriptor() ([]byte, []int)
Deprecated: Use UndeletePhraseSetRequest.ProtoReflect.Descriptor instead.
func (*UndeletePhraseSetRequest) GetEtag
func (x *UndeletePhraseSetRequest) GetEtag() string
func (*UndeletePhraseSetRequest) GetName
func (x *UndeletePhraseSetRequest) GetName() string
func (*UndeletePhraseSetRequest) GetValidateOnly
func (x *UndeletePhraseSetRequest) GetValidateOnly() bool
func (*UndeletePhraseSetRequest) ProtoMessage
func (*UndeletePhraseSetRequest) ProtoMessage()
func (*UndeletePhraseSetRequest) ProtoReflect
func (x *UndeletePhraseSetRequest) ProtoReflect() protoreflect.Message
func (*UndeletePhraseSetRequest) Reset
func (x *UndeletePhraseSetRequest) Reset()
func (*UndeletePhraseSetRequest) String
func (x *UndeletePhraseSetRequest) String() string
UndeleteRecognizerRequest
type UndeleteRecognizerRequest struct {
// Required. The name of the Recognizer to undelete.
// Format: `projects/{project}/locations/{location}/recognizers/{recognizer}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// If set, validate the request and preview the undeleted Recognizer, but do
// not actually undelete it.
ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
// This checksum is computed by the server based on the value of other
// fields. This may be sent on update, undelete, and delete requests to ensure
// the client has an up-to-date value before proceeding.
Etag string `protobuf:"bytes,4,opt,name=etag,proto3" json:"etag,omitempty"`
// contains filtered or unexported fields
}
Request message for the [UndeleteRecognizer][google.cloud.speech.v2.Speech.UndeleteRecognizer] method.
func (*UndeleteRecognizerRequest) Descriptor
func (*UndeleteRecognizerRequest) Descriptor() ([]byte, []int)
Deprecated: Use UndeleteRecognizerRequest.ProtoReflect.Descriptor instead.
func (*UndeleteRecognizerRequest) GetEtag
func (x *UndeleteRecognizerRequest) GetEtag() string
func (*UndeleteRecognizerRequest) GetName
func (x *UndeleteRecognizerRequest) GetName() string
func (*UndeleteRecognizerRequest) GetValidateOnly
func (x *UndeleteRecognizerRequest) GetValidateOnly() bool
func (*UndeleteRecognizerRequest) ProtoMessage
func (*UndeleteRecognizerRequest) ProtoMessage()
func (*UndeleteRecognizerRequest) ProtoReflect
func (x *UndeleteRecognizerRequest) ProtoReflect() protoreflect.Message
func (*UndeleteRecognizerRequest) Reset
func (x *UndeleteRecognizerRequest) Reset()
func (*UndeleteRecognizerRequest) String
func (x *UndeleteRecognizerRequest) String() string
UnimplementedSpeechServer
type UnimplementedSpeechServer struct {
}
UnimplementedSpeechServer can be embedded to have forward compatible implementations.
func (*UnimplementedSpeechServer) BatchRecognize
func (*UnimplementedSpeechServer) BatchRecognize(context.Context, *BatchRecognizeRequest) (*longrunning.Operation, error)
func (*UnimplementedSpeechServer) CreateCustomClass
func (*UnimplementedSpeechServer) CreateCustomClass(context.Context, *CreateCustomClassRequest) (*longrunning.Operation, error)
func (*UnimplementedSpeechServer) CreatePhraseSet
func (*UnimplementedSpeechServer) CreatePhraseSet(context.Context, *CreatePhraseSetRequest) (*longrunning.Operation, error)
func (*UnimplementedSpeechServer) CreateRecognizer
func (*UnimplementedSpeechServer) CreateRecognizer(context.Context, *CreateRecognizerRequest) (*longrunning.Operation, error)
func (*UnimplementedSpeechServer) DeleteCustomClass
func (*UnimplementedSpeechServer) DeleteCustomClass(context.Context, *DeleteCustomClassRequest) (*longrunning.Operation, error)
func (*UnimplementedSpeechServer) DeletePhraseSet
func (*UnimplementedSpeechServer) DeletePhraseSet(context.Context, *DeletePhraseSetRequest) (*longrunning.Operation, error)
func (*UnimplementedSpeechServer) DeleteRecognizer
func (*UnimplementedSpeechServer) DeleteRecognizer(context.Context, *DeleteRecognizerRequest) (*longrunning.Operation, error)
func (*UnimplementedSpeechServer) GetConfig
func (*UnimplementedSpeechServer) GetConfig(context.Context, *GetConfigRequest) (*Config, error)
func (*UnimplementedSpeechServer) GetCustomClass
func (*UnimplementedSpeechServer) GetCustomClass(context.Context, *GetCustomClassRequest) (*CustomClass, error)
func (*UnimplementedSpeechServer) GetPhraseSet
func (*UnimplementedSpeechServer) GetPhraseSet(context.Context, *GetPhraseSetRequest) (*PhraseSet, error)
func (*UnimplementedSpeechServer) GetRecognizer
func (*UnimplementedSpeechServer) GetRecognizer(context.Context, *GetRecognizerRequest) (*Recognizer, error)
func (*UnimplementedSpeechServer) ListCustomClasses
func (*UnimplementedSpeechServer) ListCustomClasses(context.Context, *ListCustomClassesRequest) (*ListCustomClassesResponse, error)
func (*UnimplementedSpeechServer) ListPhraseSets
func (*UnimplementedSpeechServer) ListPhraseSets(context.Context, *ListPhraseSetsRequest) (*ListPhraseSetsResponse, error)
func (*UnimplementedSpeechServer) ListRecognizers
func (*UnimplementedSpeechServer) ListRecognizers(context.Context, *ListRecognizersRequest) (*ListRecognizersResponse, error)
func (*UnimplementedSpeechServer) Recognize
func (*UnimplementedSpeechServer) Recognize(context.Context, *RecognizeRequest) (*RecognizeResponse, error)
func (*UnimplementedSpeechServer) StreamingRecognize
func (*UnimplementedSpeechServer) StreamingRecognize(Speech_StreamingRecognizeServer) error
func (*UnimplementedSpeechServer) UndeleteCustomClass
func (*UnimplementedSpeechServer) UndeleteCustomClass(context.Context, *UndeleteCustomClassRequest) (*longrunning.Operation, error)
func (*UnimplementedSpeechServer) UndeletePhraseSet
func (*UnimplementedSpeechServer) UndeletePhraseSet(context.Context, *UndeletePhraseSetRequest) (*longrunning.Operation, error)
func (*UnimplementedSpeechServer) UndeleteRecognizer
func (*UnimplementedSpeechServer) UndeleteRecognizer(context.Context, *UndeleteRecognizerRequest) (*longrunning.Operation, error)
func (*UnimplementedSpeechServer) UpdateConfig
func (*UnimplementedSpeechServer) UpdateConfig(context.Context, *UpdateConfigRequest) (*Config, error)
func (*UnimplementedSpeechServer) UpdateCustomClass
func (*UnimplementedSpeechServer) UpdateCustomClass(context.Context, *UpdateCustomClassRequest) (*longrunning.Operation, error)
func (*UnimplementedSpeechServer) UpdatePhraseSet
func (*UnimplementedSpeechServer) UpdatePhraseSet(context.Context, *UpdatePhraseSetRequest) (*longrunning.Operation, error)
func (*UnimplementedSpeechServer) UpdateRecognizer
func (*UnimplementedSpeechServer) UpdateRecognizer(context.Context, *UpdateRecognizerRequest) (*longrunning.Operation, error)
UpdateConfigRequest
type UpdateConfigRequest struct {
// Required. The config to update.
//
// The config's `name` field is used to identify the config to be updated.
// The expected format is `projects/{project}/locations/{location}/config`.
Config *Config `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
// The list of fields to be updated.
UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
// contains filtered or unexported fields
}
Request message for the [UpdateConfig][google.cloud.speech.v2.Speech.UpdateConfig] method.
func (*UpdateConfigRequest) Descriptor
func (*UpdateConfigRequest) Descriptor() ([]byte, []int)
Deprecated: Use UpdateConfigRequest.ProtoReflect.Descriptor instead.
func (*UpdateConfigRequest) GetConfig
func (x *UpdateConfigRequest) GetConfig() *Config
func (*UpdateConfigRequest) GetUpdateMask
func (x *UpdateConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask
func (*UpdateConfigRequest) ProtoMessage
func (*UpdateConfigRequest) ProtoMessage()
func (*UpdateConfigRequest) ProtoReflect
func (x *UpdateConfigRequest) ProtoReflect() protoreflect.Message
func (*UpdateConfigRequest) Reset
func (x *UpdateConfigRequest) Reset()
func (*UpdateConfigRequest) String
func (x *UpdateConfigRequest) String() string
UpdateCustomClassRequest
type UpdateCustomClassRequest struct {
// Required. The CustomClass to update.
//
// The CustomClass's `name` field is used to identify the CustomClass to
// update. Format:
// `projects/{project}/locations/{location}/customClasses/{custom_class}`.
CustomClass *CustomClass `protobuf:"bytes,1,opt,name=custom_class,json=customClass,proto3" json:"custom_class,omitempty"`
// The list of fields to be updated. If empty, all fields are considered for
// update.
UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
// If set, validate the request and preview the updated CustomClass, but do
// not actually update it.
ValidateOnly bool `protobuf:"varint,4,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
// contains filtered or unexported fields
}
Request message for the [UpdateCustomClass][google.cloud.speech.v2.Speech.UpdateCustomClass] method.
func (*UpdateCustomClassRequest) Descriptor
func (*UpdateCustomClassRequest) Descriptor() ([]byte, []int)
Deprecated: Use UpdateCustomClassRequest.ProtoReflect.Descriptor instead.
func (*UpdateCustomClassRequest) GetCustomClass
func (x *UpdateCustomClassRequest) GetCustomClass() *CustomClass
func (*UpdateCustomClassRequest) GetUpdateMask
func (x *UpdateCustomClassRequest) GetUpdateMask() *fieldmaskpb.FieldMask
func (*UpdateCustomClassRequest) GetValidateOnly
func (x *UpdateCustomClassRequest) GetValidateOnly() bool
func (*UpdateCustomClassRequest) ProtoMessage
func (*UpdateCustomClassRequest) ProtoMessage()
func (*UpdateCustomClassRequest) ProtoReflect
func (x *UpdateCustomClassRequest) ProtoReflect() protoreflect.Message
func (*UpdateCustomClassRequest) Reset
func (x *UpdateCustomClassRequest) Reset()
func (*UpdateCustomClassRequest) String
func (x *UpdateCustomClassRequest) String() string
UpdatePhraseSetRequest
type UpdatePhraseSetRequest struct {
// Required. The PhraseSet to update.
//
// The PhraseSet's `name` field is used to identify the PhraseSet to update.
// Format: `projects/{project}/locations/{location}/phraseSets/{phrase_set}`.
PhraseSet *PhraseSet `protobuf:"bytes,1,opt,name=phrase_set,json=phraseSet,proto3" json:"phrase_set,omitempty"`
// The list of fields to update. If empty, all non-default valued fields are
// considered for update. Use `*` to update the entire PhraseSet resource.
UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
// If set, validate the request and preview the updated PhraseSet, but do not
// actually update it.
ValidateOnly bool `protobuf:"varint,4,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
// contains filtered or unexported fields
}
Request message for the [UpdatePhraseSet][google.cloud.speech.v2.Speech.UpdatePhraseSet] method.
func (*UpdatePhraseSetRequest) Descriptor
func (*UpdatePhraseSetRequest) Descriptor() ([]byte, []int)
Deprecated: Use UpdatePhraseSetRequest.ProtoReflect.Descriptor instead.
func (*UpdatePhraseSetRequest) GetPhraseSet
func (x *UpdatePhraseSetRequest) GetPhraseSet() *PhraseSet
func (*UpdatePhraseSetRequest) GetUpdateMask
func (x *UpdatePhraseSetRequest) GetUpdateMask() *fieldmaskpb.FieldMask
func (*UpdatePhraseSetRequest) GetValidateOnly
func (x *UpdatePhraseSetRequest) GetValidateOnly() bool
func (*UpdatePhraseSetRequest) ProtoMessage
func (*UpdatePhraseSetRequest) ProtoMessage()
func (*UpdatePhraseSetRequest) ProtoReflect
func (x *UpdatePhraseSetRequest) ProtoReflect() protoreflect.Message
func (*UpdatePhraseSetRequest) Reset
func (x *UpdatePhraseSetRequest) Reset()
func (*UpdatePhraseSetRequest) String
func (x *UpdatePhraseSetRequest) String() string
UpdateRecognizerRequest
type UpdateRecognizerRequest struct {
// Required. The Recognizer to update.
//
// The Recognizer's `name` field is used to identify the Recognizer to update.
// Format: `projects/{project}/locations/{location}/recognizers/{recognizer}`.
Recognizer *Recognizer `protobuf:"bytes,1,opt,name=recognizer,proto3" json:"recognizer,omitempty"`
// The list of fields to update. If empty, all non-default valued fields are
// considered for update. Use `*` to update the entire Recognizer resource.
UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
// If set, validate the request and preview the updated Recognizer, but do not
// actually update it.
ValidateOnly bool `protobuf:"varint,4,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
// contains filtered or unexported fields
}
Request message for the [UpdateRecognizer][google.cloud.speech.v2.Speech.UpdateRecognizer] method.
func (*UpdateRecognizerRequest) Descriptor
func (*UpdateRecognizerRequest) Descriptor() ([]byte, []int)
Deprecated: Use UpdateRecognizerRequest.ProtoReflect.Descriptor instead.
func (*UpdateRecognizerRequest) GetRecognizer
func (x *UpdateRecognizerRequest) GetRecognizer() *Recognizer
func (*UpdateRecognizerRequest) GetUpdateMask
func (x *UpdateRecognizerRequest) GetUpdateMask() *fieldmaskpb.FieldMask
func (*UpdateRecognizerRequest) GetValidateOnly
func (x *UpdateRecognizerRequest) GetValidateOnly() bool
func (*UpdateRecognizerRequest) ProtoMessage
func (*UpdateRecognizerRequest) ProtoMessage()
func (*UpdateRecognizerRequest) ProtoReflect
func (x *UpdateRecognizerRequest) ProtoReflect() protoreflect.Message
func (*UpdateRecognizerRequest) Reset
func (x *UpdateRecognizerRequest) Reset()
func (*UpdateRecognizerRequest) String
func (x *UpdateRecognizerRequest) String() string
WordInfo
type WordInfo struct {
// Time offset relative to the beginning of the audio,
// and corresponding to the start of the spoken word.
// This field is only set if
// [enable_word_time_offsets][google.cloud.speech.v2.RecognitionFeatures.enable_word_time_offsets]
// is `true` and only in the top hypothesis. This is an experimental feature
// and the accuracy of the time offset can vary.
StartOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=start_offset,json=startOffset,proto3" json:"start_offset,omitempty"`
// Time offset relative to the beginning of the audio,
// and corresponding to the end of the spoken word.
// This field is only set if
// [enable_word_time_offsets][google.cloud.speech.v2.RecognitionFeatures.enable_word_time_offsets]
// is `true` and only in the top hypothesis. This is an experimental feature
// and the accuracy of the time offset can vary.
EndOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=end_offset,json=endOffset,proto3" json:"end_offset,omitempty"`
// The word corresponding to this set of information.
Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
// The confidence estimate between 0.0 and 1.0. A higher number
// indicates an estimated greater likelihood that the recognized words are
// correct. This field is set only for the top alternative of a non-streaming
// result or, of a streaming result where
// [is_final][google.cloud.speech.v2.StreamingRecognitionResult.is_final] is
// set to `true`. This field is not guaranteed to be accurate and users should
// not rely on it to be always provided. The default of 0.0 is a sentinel
// value indicating `confidence` was not set.
Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
// A distinct label is assigned for every speaker within the audio. This field
// specifies which one of those speakers was detected to have spoken this
// word. `speaker_label` is set if
// [enable_speaker_diarization][google.cloud.speech.v2.SpeakerDiarizationConfig.enable_speaker_diarization]
// is `true` and only in the top alternative.
SpeakerLabel string `protobuf:"bytes,6,opt,name=speaker_label,json=speakerLabel,proto3" json:"speaker_label,omitempty"`
// contains filtered or unexported fields
}
Word-specific information for recognized words.
func (*WordInfo) Descriptor
Deprecated: Use WordInfo.ProtoReflect.Descriptor instead.
func (*WordInfo) GetConfidence
func (*WordInfo) GetEndOffset
func (x *WordInfo) GetEndOffset() *durationpb.Duration
func (*WordInfo) GetSpeakerLabel
func (*WordInfo) GetStartOffset
func (x *WordInfo) GetStartOffset() *durationpb.Duration
func (*WordInfo) GetWord
func (*WordInfo) ProtoMessage
func (*WordInfo) ProtoMessage()
func (*WordInfo) ProtoReflect
func (x *WordInfo) ProtoReflect() protoreflect.Message
func (*WordInfo) Reset
func (x *WordInfo) Reset()