Variables
Feature_name, Feature_value
var (
Feature_name = map[int32]string{
0: "FEATURE_UNSPECIFIED",
1: "LABEL_DETECTION",
2: "SHOT_CHANGE_DETECTION",
3: "EXPLICIT_CONTENT_DETECTION",
4: "FACE_DETECTION",
6: "SPEECH_TRANSCRIPTION",
7: "TEXT_DETECTION",
9: "OBJECT_TRACKING",
12: "LOGO_RECOGNITION",
14: "PERSON_DETECTION",
}
Feature_value = map[string]int32{
"FEATURE_UNSPECIFIED": 0,
"LABEL_DETECTION": 1,
"SHOT_CHANGE_DETECTION": 2,
"EXPLICIT_CONTENT_DETECTION": 3,
"FACE_DETECTION": 4,
"SPEECH_TRANSCRIPTION": 6,
"TEXT_DETECTION": 7,
"OBJECT_TRACKING": 9,
"LOGO_RECOGNITION": 12,
"PERSON_DETECTION": 14,
}
)
Enum value maps for Feature.
LabelDetectionMode_name, LabelDetectionMode_value
var (
LabelDetectionMode_name = map[int32]string{
0: "LABEL_DETECTION_MODE_UNSPECIFIED",
1: "SHOT_MODE",
2: "FRAME_MODE",
3: "SHOT_AND_FRAME_MODE",
}
LabelDetectionMode_value = map[string]int32{
"LABEL_DETECTION_MODE_UNSPECIFIED": 0,
"SHOT_MODE": 1,
"FRAME_MODE": 2,
"SHOT_AND_FRAME_MODE": 3,
}
)
Enum value maps for LabelDetectionMode.
Likelihood_name, Likelihood_value
var (
Likelihood_name = map[int32]string{
0: "LIKELIHOOD_UNSPECIFIED",
1: "VERY_UNLIKELY",
2: "UNLIKELY",
3: "POSSIBLE",
4: "LIKELY",
5: "VERY_LIKELY",
}
Likelihood_value = map[string]int32{
"LIKELIHOOD_UNSPECIFIED": 0,
"VERY_UNLIKELY": 1,
"UNLIKELY": 2,
"POSSIBLE": 3,
"LIKELY": 4,
"VERY_LIKELY": 5,
}
)
Enum value maps for Likelihood.
File_google_cloud_videointelligence_v1_video_intelligence_proto
var File_google_cloud_videointelligence_v1_video_intelligence_proto protoreflect.FileDescriptor
Functions
func RegisterVideoIntelligenceServiceServer
func RegisterVideoIntelligenceServiceServer(s *grpc.Server, srv VideoIntelligenceServiceServer)
AnnotateVideoProgress
type AnnotateVideoProgress struct {
// Progress metadata for all videos specified in `AnnotateVideoRequest`.
AnnotationProgress []*VideoAnnotationProgress `protobuf:"bytes,1,rep,name=annotation_progress,json=annotationProgress,proto3" json:"annotation_progress,omitempty"`
// contains filtered or unexported fields
}
Video annotation progress. Included in the metadata
field of the Operation
returned by the GetOperation
call of the google::longrunning::Operations
service.
func (*AnnotateVideoProgress) Descriptor
func (*AnnotateVideoProgress) Descriptor() ([]byte, []int)
Deprecated: Use AnnotateVideoProgress.ProtoReflect.Descriptor instead.
func (*AnnotateVideoProgress) GetAnnotationProgress
func (x *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress
func (*AnnotateVideoProgress) ProtoMessage
func (*AnnotateVideoProgress) ProtoMessage()
func (*AnnotateVideoProgress) ProtoReflect
func (x *AnnotateVideoProgress) ProtoReflect() protoreflect.Message
func (*AnnotateVideoProgress) Reset
func (x *AnnotateVideoProgress) Reset()
func (*AnnotateVideoProgress) String
func (x *AnnotateVideoProgress) String() string
AnnotateVideoRequest
type AnnotateVideoRequest struct {
// Input video location. Currently, only
// [Cloud Storage](https://cloud.google.com/storage/) URIs are
// supported. URIs must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
// more information, see [Request
// URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
// multiple videos, a video URI may include wildcards in the `object-id`.
// Supported wildcards: '*' to match 0 or more characters;
// '?' to match 1 character. If unset, the input video should be embedded
// in the request as `input_content`. If set, `input_content` must be unset.
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// The video data bytes.
// If unset, the input video(s) should be specified via the `input_uri`.
// If set, `input_uri` must be unset.
InputContent []byte `protobuf:"bytes,6,opt,name=input_content,json=inputContent,proto3" json:"input_content,omitempty"`
// Required. Requested video annotation features.
Features []Feature `protobuf:"varint,2,rep,packed,name=features,proto3,enum=google.cloud.videointelligence.v1.Feature" json:"features,omitempty"`
// Additional video context and/or feature-specific parameters.
VideoContext *VideoContext `protobuf:"bytes,3,opt,name=video_context,json=videoContext,proto3" json:"video_context,omitempty"`
// Optional. Location where the output (in JSON format) should be stored.
// Currently, only [Cloud Storage](https://cloud.google.com/storage/)
// URIs are supported. These must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
// more information, see [Request
// URIs](https://cloud.google.com/storage/docs/request-endpoints).
OutputUri string `protobuf:"bytes,4,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
// Optional. Cloud region where annotation should take place. Supported cloud
// regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
// region is specified, the region will be determined based on video file
// location.
LocationId string `protobuf:"bytes,5,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
// contains filtered or unexported fields
}
Video annotation request.
func (*AnnotateVideoRequest) Descriptor
func (*AnnotateVideoRequest) Descriptor() ([]byte, []int)
Deprecated: Use AnnotateVideoRequest.ProtoReflect.Descriptor instead.
func (*AnnotateVideoRequest) GetFeatures
func (x *AnnotateVideoRequest) GetFeatures() []Feature
func (*AnnotateVideoRequest) GetInputContent
func (x *AnnotateVideoRequest) GetInputContent() []byte
func (*AnnotateVideoRequest) GetInputUri
func (x *AnnotateVideoRequest) GetInputUri() string
func (*AnnotateVideoRequest) GetLocationId
func (x *AnnotateVideoRequest) GetLocationId() string
func (*AnnotateVideoRequest) GetOutputUri
func (x *AnnotateVideoRequest) GetOutputUri() string
func (*AnnotateVideoRequest) GetVideoContext
func (x *AnnotateVideoRequest) GetVideoContext() *VideoContext
func (*AnnotateVideoRequest) ProtoMessage
func (*AnnotateVideoRequest) ProtoMessage()
func (*AnnotateVideoRequest) ProtoReflect
func (x *AnnotateVideoRequest) ProtoReflect() protoreflect.Message
func (*AnnotateVideoRequest) Reset
func (x *AnnotateVideoRequest) Reset()
func (*AnnotateVideoRequest) String
func (x *AnnotateVideoRequest) String() string
AnnotateVideoResponse
type AnnotateVideoResponse struct {
// Annotation results for all videos specified in `AnnotateVideoRequest`.
AnnotationResults []*VideoAnnotationResults `protobuf:"bytes,1,rep,name=annotation_results,json=annotationResults,proto3" json:"annotation_results,omitempty"`
// contains filtered or unexported fields
}
Video annotation response. Included in the response
field of the Operation
returned by the GetOperation
call of the google::longrunning::Operations
service.
func (*AnnotateVideoResponse) Descriptor
func (*AnnotateVideoResponse) Descriptor() ([]byte, []int)
Deprecated: Use AnnotateVideoResponse.ProtoReflect.Descriptor instead.
func (*AnnotateVideoResponse) GetAnnotationResults
func (x *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults
func (*AnnotateVideoResponse) ProtoMessage
func (*AnnotateVideoResponse) ProtoMessage()
func (*AnnotateVideoResponse) ProtoReflect
func (x *AnnotateVideoResponse) ProtoReflect() protoreflect.Message
func (*AnnotateVideoResponse) Reset
func (x *AnnotateVideoResponse) Reset()
func (*AnnotateVideoResponse) String
func (x *AnnotateVideoResponse) String() string
DetectedAttribute
type DetectedAttribute struct {
// The name of the attribute, for example, glasses, dark_glasses, mouth_open.
// A full list of supported type names will be provided in the document.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Detected attribute confidence. Range [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Text value of the detection result. For example, the value for "HairColor"
// can be "black", "blonde", etc.
Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
// contains filtered or unexported fields
}
A generic detected attribute represented by name in string format.
func (*DetectedAttribute) Descriptor
func (*DetectedAttribute) Descriptor() ([]byte, []int)
Deprecated: Use DetectedAttribute.ProtoReflect.Descriptor instead.
func (*DetectedAttribute) GetConfidence
func (x *DetectedAttribute) GetConfidence() float32
func (*DetectedAttribute) GetName
func (x *DetectedAttribute) GetName() string
func (*DetectedAttribute) GetValue
func (x *DetectedAttribute) GetValue() string
func (*DetectedAttribute) ProtoMessage
func (*DetectedAttribute) ProtoMessage()
func (*DetectedAttribute) ProtoReflect
func (x *DetectedAttribute) ProtoReflect() protoreflect.Message
func (*DetectedAttribute) Reset
func (x *DetectedAttribute) Reset()
func (*DetectedAttribute) String
func (x *DetectedAttribute) String() string
DetectedLandmark
type DetectedLandmark struct {
// The name of this landmark, for example, left_hand, right_shoulder.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// The 2D point of the detected landmark using the normalized image
// coordindate system. The normalized coordinates have the range from 0 to 1.
Point *NormalizedVertex `protobuf:"bytes,2,opt,name=point,proto3" json:"point,omitempty"`
// The confidence score of the detected landmark. Range [0, 1].
Confidence float32 `protobuf:"fixed32,3,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}
A generic detected landmark represented by name in string format and a 2D location.
func (*DetectedLandmark) Descriptor
func (*DetectedLandmark) Descriptor() ([]byte, []int)
Deprecated: Use DetectedLandmark.ProtoReflect.Descriptor instead.
func (*DetectedLandmark) GetConfidence
func (x *DetectedLandmark) GetConfidence() float32
func (*DetectedLandmark) GetName
func (x *DetectedLandmark) GetName() string
func (*DetectedLandmark) GetPoint
func (x *DetectedLandmark) GetPoint() *NormalizedVertex
func (*DetectedLandmark) ProtoMessage
func (*DetectedLandmark) ProtoMessage()
func (*DetectedLandmark) ProtoReflect
func (x *DetectedLandmark) ProtoReflect() protoreflect.Message
func (*DetectedLandmark) Reset
func (x *DetectedLandmark) Reset()
func (*DetectedLandmark) String
func (x *DetectedLandmark) String() string
Entity
type Entity struct {
// Opaque entity ID. Some IDs may be available in
// [Google Knowledge Graph Search
// API](https://developers.google.com/knowledge-graph/).
EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
// Textual description, e.g., `Fixed-gear bicycle`.
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
// Language code for `description` in BCP-47 format.
LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// contains filtered or unexported fields
}
Detected entity from video analysis.
func (*Entity) Descriptor
Deprecated: Use Entity.ProtoReflect.Descriptor instead.
func (*Entity) GetDescription
func (*Entity) GetEntityId
func (*Entity) GetLanguageCode
func (*Entity) ProtoMessage
func (*Entity) ProtoMessage()
func (*Entity) ProtoReflect
func (x *Entity) ProtoReflect() protoreflect.Message
func (*Entity) Reset
func (x *Entity) Reset()
func (*Entity) String
ExplicitContentAnnotation
type ExplicitContentAnnotation struct {
// All video frames where explicit content was detected.
Frames []*ExplicitContentFrame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"`
// Feature version.
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame.
func (*ExplicitContentAnnotation) Descriptor
func (*ExplicitContentAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use ExplicitContentAnnotation.ProtoReflect.Descriptor instead.
func (*ExplicitContentAnnotation) GetFrames
func (x *ExplicitContentAnnotation) GetFrames() []*ExplicitContentFrame
func (*ExplicitContentAnnotation) GetVersion
func (x *ExplicitContentAnnotation) GetVersion() string
func (*ExplicitContentAnnotation) ProtoMessage
func (*ExplicitContentAnnotation) ProtoMessage()
func (*ExplicitContentAnnotation) ProtoReflect
func (x *ExplicitContentAnnotation) ProtoReflect() protoreflect.Message
func (*ExplicitContentAnnotation) Reset
func (x *ExplicitContentAnnotation) Reset()
func (*ExplicitContentAnnotation) String
func (x *ExplicitContentAnnotation) String() string
ExplicitContentDetectionConfig
type ExplicitContentDetectionConfig struct {
// Model to use for explicit content detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}
Config for EXPLICIT_CONTENT_DETECTION.
func (*ExplicitContentDetectionConfig) Descriptor
func (*ExplicitContentDetectionConfig) Descriptor() ([]byte, []int)
Deprecated: Use ExplicitContentDetectionConfig.ProtoReflect.Descriptor instead.
func (*ExplicitContentDetectionConfig) GetModel
func (x *ExplicitContentDetectionConfig) GetModel() string
func (*ExplicitContentDetectionConfig) ProtoMessage
func (*ExplicitContentDetectionConfig) ProtoMessage()
func (*ExplicitContentDetectionConfig) ProtoReflect
func (x *ExplicitContentDetectionConfig) ProtoReflect() protoreflect.Message
func (*ExplicitContentDetectionConfig) Reset
func (x *ExplicitContentDetectionConfig) Reset()
func (*ExplicitContentDetectionConfig) String
func (x *ExplicitContentDetectionConfig) String() string
ExplicitContentFrame
type ExplicitContentFrame struct {
TimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
PornographyLikelihood Likelihood "" /* 175 byte string literal not displayed */
}
Video frame level annotation results for explicit content.
func (*ExplicitContentFrame) Descriptor
func (*ExplicitContentFrame) Descriptor() ([]byte, []int)
Deprecated: Use ExplicitContentFrame.ProtoReflect.Descriptor instead.
func (*ExplicitContentFrame) GetPornographyLikelihood
func (x *ExplicitContentFrame) GetPornographyLikelihood() Likelihood
func (*ExplicitContentFrame) GetTimeOffset
func (x *ExplicitContentFrame) GetTimeOffset() *durationpb.Duration
func (*ExplicitContentFrame) ProtoMessage
func (*ExplicitContentFrame) ProtoMessage()
func (*ExplicitContentFrame) ProtoReflect
func (x *ExplicitContentFrame) ProtoReflect() protoreflect.Message
func (*ExplicitContentFrame) Reset
func (x *ExplicitContentFrame) Reset()
func (*ExplicitContentFrame) String
func (x *ExplicitContentFrame) String() string
FaceAnnotation (deprecated)
type FaceAnnotation struct {
// Thumbnail of a representative face view (in JPEG format).
Thumbnail []byte `protobuf:"bytes,1,opt,name=thumbnail,proto3" json:"thumbnail,omitempty"`
// All video segments where a face was detected.
Segments []*FaceSegment `protobuf:"bytes,2,rep,name=segments,proto3" json:"segments,omitempty"`
// All video frames where a face was detected.
Frames []*FaceFrame `protobuf:"bytes,3,rep,name=frames,proto3" json:"frames,omitempty"`
// contains filtered or unexported fields
}
Deprecated. No effect.
Deprecated: Do not use.
func (*FaceAnnotation) Descriptor (deprecated)
func (*FaceAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use FaceAnnotation.ProtoReflect.Descriptor instead.
func (*FaceAnnotation) GetFrames (deprecated)
func (x *FaceAnnotation) GetFrames() []*FaceFrame
func (*FaceAnnotation) GetSegments (deprecated)
func (x *FaceAnnotation) GetSegments() []*FaceSegment
func (*FaceAnnotation) GetThumbnail (deprecated)
func (x *FaceAnnotation) GetThumbnail() []byte
func (*FaceAnnotation) ProtoMessage (deprecated)
func (*FaceAnnotation) ProtoMessage()
func (*FaceAnnotation) ProtoReflect (deprecated)
func (x *FaceAnnotation) ProtoReflect() protoreflect.Message
func (*FaceAnnotation) Reset (deprecated)
func (x *FaceAnnotation) Reset()
func (*FaceAnnotation) String (deprecated)
func (x *FaceAnnotation) String() string
FaceDetectionAnnotation
type FaceDetectionAnnotation struct {
// The face tracks with attributes.
Tracks []*Track `protobuf:"bytes,3,rep,name=tracks,proto3" json:"tracks,omitempty"`
// The thumbnail of a person's face.
Thumbnail []byte `protobuf:"bytes,4,opt,name=thumbnail,proto3" json:"thumbnail,omitempty"`
// Feature version.
Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
Face detection annotation.
func (*FaceDetectionAnnotation) Descriptor
func (*FaceDetectionAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use FaceDetectionAnnotation.ProtoReflect.Descriptor instead.
func (*FaceDetectionAnnotation) GetThumbnail
func (x *FaceDetectionAnnotation) GetThumbnail() []byte
func (*FaceDetectionAnnotation) GetTracks
func (x *FaceDetectionAnnotation) GetTracks() []*Track
func (*FaceDetectionAnnotation) GetVersion
func (x *FaceDetectionAnnotation) GetVersion() string
func (*FaceDetectionAnnotation) ProtoMessage
func (*FaceDetectionAnnotation) ProtoMessage()
func (*FaceDetectionAnnotation) ProtoReflect
func (x *FaceDetectionAnnotation) ProtoReflect() protoreflect.Message
func (*FaceDetectionAnnotation) Reset
func (x *FaceDetectionAnnotation) Reset()
func (*FaceDetectionAnnotation) String
func (x *FaceDetectionAnnotation) String() string
FaceDetectionConfig
type FaceDetectionConfig struct {
// Model to use for face detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// Whether bounding boxes are included in the face annotation output.
IncludeBoundingBoxes bool `protobuf:"varint,2,opt,name=include_bounding_boxes,json=includeBoundingBoxes,proto3" json:"include_bounding_boxes,omitempty"`
// Whether to enable face attributes detection, such as glasses, dark_glasses,
// mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
IncludeAttributes bool `protobuf:"varint,5,opt,name=include_attributes,json=includeAttributes,proto3" json:"include_attributes,omitempty"`
// contains filtered or unexported fields
}
Config for FACE_DETECTION.
func (*FaceDetectionConfig) Descriptor
func (*FaceDetectionConfig) Descriptor() ([]byte, []int)
Deprecated: Use FaceDetectionConfig.ProtoReflect.Descriptor instead.
func (*FaceDetectionConfig) GetIncludeAttributes
func (x *FaceDetectionConfig) GetIncludeAttributes() bool
func (*FaceDetectionConfig) GetIncludeBoundingBoxes
func (x *FaceDetectionConfig) GetIncludeBoundingBoxes() bool
func (*FaceDetectionConfig) GetModel
func (x *FaceDetectionConfig) GetModel() string
func (*FaceDetectionConfig) ProtoMessage
func (*FaceDetectionConfig) ProtoMessage()
func (*FaceDetectionConfig) ProtoReflect
func (x *FaceDetectionConfig) ProtoReflect() protoreflect.Message
func (*FaceDetectionConfig) Reset
func (x *FaceDetectionConfig) Reset()
func (*FaceDetectionConfig) String
func (x *FaceDetectionConfig) String() string
FaceFrame (deprecated)
type FaceFrame struct {
NormalizedBoundingBoxes []*NormalizedBoundingBox "" /* 132 byte string literal not displayed */
TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
}
Deprecated. No effect.
Deprecated: Do not use.
func (*FaceFrame) Descriptor (deprecated)
Deprecated: Use FaceFrame.ProtoReflect.Descriptor instead.
func (*FaceFrame) GetNormalizedBoundingBoxes (deprecated)
func (x *FaceFrame) GetNormalizedBoundingBoxes() []*NormalizedBoundingBox
func (*FaceFrame) GetTimeOffset (deprecated)
func (x *FaceFrame) GetTimeOffset() *durationpb.Duration
func (*FaceFrame) ProtoMessage (deprecated)
func (*FaceFrame) ProtoMessage()
func (*FaceFrame) ProtoReflect (deprecated)
func (x *FaceFrame) ProtoReflect() protoreflect.Message
func (*FaceFrame) Reset (deprecated)
func (x *FaceFrame) Reset()
func (*FaceFrame) String (deprecated)
FaceSegment
type FaceSegment struct {
// Video segment where a face was detected.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// contains filtered or unexported fields
}
Video segment level annotation results for face detection.
func (*FaceSegment) Descriptor
func (*FaceSegment) Descriptor() ([]byte, []int)
Deprecated: Use FaceSegment.ProtoReflect.Descriptor instead.
func (*FaceSegment) GetSegment
func (x *FaceSegment) GetSegment() *VideoSegment
func (*FaceSegment) ProtoMessage
func (*FaceSegment) ProtoMessage()
func (*FaceSegment) ProtoReflect
func (x *FaceSegment) ProtoReflect() protoreflect.Message
func (*FaceSegment) Reset
func (x *FaceSegment) Reset()
func (*FaceSegment) String
func (x *FaceSegment) String() string
Feature
type Feature int32
Video annotation feature.
Feature_FEATURE_UNSPECIFIED, Feature_LABEL_DETECTION, Feature_SHOT_CHANGE_DETECTION, Feature_EXPLICIT_CONTENT_DETECTION, Feature_FACE_DETECTION, Feature_SPEECH_TRANSCRIPTION, Feature_TEXT_DETECTION, Feature_OBJECT_TRACKING, Feature_LOGO_RECOGNITION, Feature_PERSON_DETECTION
const (
// Unspecified.
Feature_FEATURE_UNSPECIFIED Feature = 0
// Label detection. Detect objects, such as dog or flower.
Feature_LABEL_DETECTION Feature = 1
// Shot change detection.
Feature_SHOT_CHANGE_DETECTION Feature = 2
// Explicit content detection.
Feature_EXPLICIT_CONTENT_DETECTION Feature = 3
// Human face detection.
Feature_FACE_DETECTION Feature = 4
// Speech transcription.
Feature_SPEECH_TRANSCRIPTION Feature = 6
// OCR text detection and tracking.
Feature_TEXT_DETECTION Feature = 7
// Object detection and tracking.
Feature_OBJECT_TRACKING Feature = 9
// Logo detection, tracking, and recognition.
Feature_LOGO_RECOGNITION Feature = 12
// Person detection.
Feature_PERSON_DETECTION Feature = 14
)
func (Feature) Descriptor
func (Feature) Descriptor() protoreflect.EnumDescriptor
func (Feature) Enum
func (Feature) EnumDescriptor
Deprecated: Use Feature.Descriptor instead.
func (Feature) Number
func (x Feature) Number() protoreflect.EnumNumber
func (Feature) String
func (Feature) Type
func (Feature) Type() protoreflect.EnumType
LabelAnnotation
type LabelAnnotation struct {
// Detected entity.
Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
// Common categories for the detected entity.
// For example, when the label is `Terrier`, the category is likely `dog`. And
// in some cases there might be more than one categories e.g., `Terrier` could
// also be a `pet`.
CategoryEntities []*Entity `protobuf:"bytes,2,rep,name=category_entities,json=categoryEntities,proto3" json:"category_entities,omitempty"`
// All video segments where a label was detected.
Segments []*LabelSegment `protobuf:"bytes,3,rep,name=segments,proto3" json:"segments,omitempty"`
// All video frames where a label was detected.
Frames []*LabelFrame `protobuf:"bytes,4,rep,name=frames,proto3" json:"frames,omitempty"`
// Feature version.
Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
Label annotation.
func (*LabelAnnotation) Descriptor
func (*LabelAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use LabelAnnotation.ProtoReflect.Descriptor instead.
func (*LabelAnnotation) GetCategoryEntities
func (x *LabelAnnotation) GetCategoryEntities() []*Entity
func (*LabelAnnotation) GetEntity
func (x *LabelAnnotation) GetEntity() *Entity
func (*LabelAnnotation) GetFrames
func (x *LabelAnnotation) GetFrames() []*LabelFrame
func (*LabelAnnotation) GetSegments
func (x *LabelAnnotation) GetSegments() []*LabelSegment
func (*LabelAnnotation) GetVersion
func (x *LabelAnnotation) GetVersion() string
func (*LabelAnnotation) ProtoMessage
func (*LabelAnnotation) ProtoMessage()
func (*LabelAnnotation) ProtoReflect
func (x *LabelAnnotation) ProtoReflect() protoreflect.Message
func (*LabelAnnotation) Reset
func (x *LabelAnnotation) Reset()
func (*LabelAnnotation) String
func (x *LabelAnnotation) String() string
LabelDetectionConfig
type LabelDetectionConfig struct {
LabelDetectionMode LabelDetectionMode "" /* 176 byte string literal not displayed */
StationaryCamera bool `protobuf:"varint,2,opt,name=stationary_camera,json=stationaryCamera,proto3" json:"stationary_camera,omitempty"`
Model string `protobuf:"bytes,3,opt,name=model,proto3" json:"model,omitempty"`
FrameConfidenceThreshold float32 "" /* 137 byte string literal not displayed */
VideoConfidenceThreshold float32 "" /* 137 byte string literal not displayed */
}
Config for LABEL_DETECTION.
func (*LabelDetectionConfig) Descriptor
func (*LabelDetectionConfig) Descriptor() ([]byte, []int)
Deprecated: Use LabelDetectionConfig.ProtoReflect.Descriptor instead.
func (*LabelDetectionConfig) GetFrameConfidenceThreshold
func (x *LabelDetectionConfig) GetFrameConfidenceThreshold() float32
func (*LabelDetectionConfig) GetLabelDetectionMode
func (x *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionMode
func (*LabelDetectionConfig) GetModel
func (x *LabelDetectionConfig) GetModel() string
func (*LabelDetectionConfig) GetStationaryCamera
func (x *LabelDetectionConfig) GetStationaryCamera() bool
func (*LabelDetectionConfig) GetVideoConfidenceThreshold
func (x *LabelDetectionConfig) GetVideoConfidenceThreshold() float32
func (*LabelDetectionConfig) ProtoMessage
func (*LabelDetectionConfig) ProtoMessage()
func (*LabelDetectionConfig) ProtoReflect
func (x *LabelDetectionConfig) ProtoReflect() protoreflect.Message
func (*LabelDetectionConfig) Reset
func (x *LabelDetectionConfig) Reset()
func (*LabelDetectionConfig) String
func (x *LabelDetectionConfig) String() string
LabelDetectionMode
type LabelDetectionMode int32
Label detection mode.
LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED, LabelDetectionMode_SHOT_MODE, LabelDetectionMode_FRAME_MODE, LabelDetectionMode_SHOT_AND_FRAME_MODE
const (
// Unspecified.
LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED LabelDetectionMode = 0
// Detect shot-level labels.
LabelDetectionMode_SHOT_MODE LabelDetectionMode = 1
// Detect frame-level labels.
LabelDetectionMode_FRAME_MODE LabelDetectionMode = 2
// Detect both shot-level and frame-level labels.
LabelDetectionMode_SHOT_AND_FRAME_MODE LabelDetectionMode = 3
)
func (LabelDetectionMode) Descriptor
func (LabelDetectionMode) Descriptor() protoreflect.EnumDescriptor
func (LabelDetectionMode) Enum
func (x LabelDetectionMode) Enum() *LabelDetectionMode
func (LabelDetectionMode) EnumDescriptor
func (LabelDetectionMode) EnumDescriptor() ([]byte, []int)
Deprecated: Use LabelDetectionMode.Descriptor instead.
func (LabelDetectionMode) Number
func (x LabelDetectionMode) Number() protoreflect.EnumNumber
func (LabelDetectionMode) String
func (x LabelDetectionMode) String() string
func (LabelDetectionMode) Type
func (LabelDetectionMode) Type() protoreflect.EnumType
LabelFrame
type LabelFrame struct {
// Time-offset, relative to the beginning of the video, corresponding to the
// video frame for this location.
TimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// Confidence that the label is accurate. Range: [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}
Video frame level annotation results for label detection.
func (*LabelFrame) Descriptor
func (*LabelFrame) Descriptor() ([]byte, []int)
Deprecated: Use LabelFrame.ProtoReflect.Descriptor instead.
func (*LabelFrame) GetConfidence
func (x *LabelFrame) GetConfidence() float32
func (*LabelFrame) GetTimeOffset
func (x *LabelFrame) GetTimeOffset() *durationpb.Duration
func (*LabelFrame) ProtoMessage
func (*LabelFrame) ProtoMessage()
func (*LabelFrame) ProtoReflect
func (x *LabelFrame) ProtoReflect() protoreflect.Message
func (*LabelFrame) Reset
func (x *LabelFrame) Reset()
func (*LabelFrame) String
func (x *LabelFrame) String() string
LabelSegment
type LabelSegment struct {
// Video segment where a label was detected.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// Confidence that the label is accurate. Range: [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}
Video segment level annotation results for label detection.
func (*LabelSegment) Descriptor
func (*LabelSegment) Descriptor() ([]byte, []int)
Deprecated: Use LabelSegment.ProtoReflect.Descriptor instead.
func (*LabelSegment) GetConfidence
func (x *LabelSegment) GetConfidence() float32
func (*LabelSegment) GetSegment
func (x *LabelSegment) GetSegment() *VideoSegment
func (*LabelSegment) ProtoMessage
func (*LabelSegment) ProtoMessage()
func (*LabelSegment) ProtoReflect
func (x *LabelSegment) ProtoReflect() protoreflect.Message
func (*LabelSegment) Reset
func (x *LabelSegment) Reset()
func (*LabelSegment) String
func (x *LabelSegment) String() string
Likelihood
type Likelihood int32
Bucketized representation of likelihood.
Likelihood_LIKELIHOOD_UNSPECIFIED, Likelihood_VERY_UNLIKELY, Likelihood_UNLIKELY, Likelihood_POSSIBLE, Likelihood_LIKELY, Likelihood_VERY_LIKELY
const (
// Unspecified likelihood.
Likelihood_LIKELIHOOD_UNSPECIFIED Likelihood = 0
// Very unlikely.
Likelihood_VERY_UNLIKELY Likelihood = 1
// Unlikely.
Likelihood_UNLIKELY Likelihood = 2
// Possible.
Likelihood_POSSIBLE Likelihood = 3
// Likely.
Likelihood_LIKELY Likelihood = 4
// Very likely.
Likelihood_VERY_LIKELY Likelihood = 5
)
func (Likelihood) Descriptor
func (Likelihood) Descriptor() protoreflect.EnumDescriptor
func (Likelihood) Enum
func (x Likelihood) Enum() *Likelihood
func (Likelihood) EnumDescriptor
func (Likelihood) EnumDescriptor() ([]byte, []int)
Deprecated: Use Likelihood.Descriptor instead.
func (Likelihood) Number
func (x Likelihood) Number() protoreflect.EnumNumber
func (Likelihood) String
func (x Likelihood) String() string
func (Likelihood) Type
func (Likelihood) Type() protoreflect.EnumType
LogoRecognitionAnnotation
type LogoRecognitionAnnotation struct {
// Entity category information to specify the logo class that all the logo
// tracks within this LogoRecognitionAnnotation are recognized as.
Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
// All logo tracks where the recognized logo appears. Each track corresponds
// to one logo instance appearing in consecutive frames.
Tracks []*Track `protobuf:"bytes,2,rep,name=tracks,proto3" json:"tracks,omitempty"`
// All video segments where the recognized logo appears. There might be
// multiple instances of the same logo class appearing in one VideoSegment.
Segments []*VideoSegment `protobuf:"bytes,3,rep,name=segments,proto3" json:"segments,omitempty"`
// contains filtered or unexported fields
}
Annotation corresponding to one detected, tracked and recognized logo class.
func (*LogoRecognitionAnnotation) Descriptor
func (*LogoRecognitionAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use LogoRecognitionAnnotation.ProtoReflect.Descriptor instead.
func (*LogoRecognitionAnnotation) GetEntity
func (x *LogoRecognitionAnnotation) GetEntity() *Entity
func (*LogoRecognitionAnnotation) GetSegments
func (x *LogoRecognitionAnnotation) GetSegments() []*VideoSegment
func (*LogoRecognitionAnnotation) GetTracks
func (x *LogoRecognitionAnnotation) GetTracks() []*Track
func (*LogoRecognitionAnnotation) ProtoMessage
func (*LogoRecognitionAnnotation) ProtoMessage()
func (*LogoRecognitionAnnotation) ProtoReflect
func (x *LogoRecognitionAnnotation) ProtoReflect() protoreflect.Message
func (*LogoRecognitionAnnotation) Reset
func (x *LogoRecognitionAnnotation) Reset()
func (*LogoRecognitionAnnotation) String
func (x *LogoRecognitionAnnotation) String() string
NormalizedBoundingBox
type NormalizedBoundingBox struct {
// Left X coordinate.
Left float32 `protobuf:"fixed32,1,opt,name=left,proto3" json:"left,omitempty"`
// Top Y coordinate.
Top float32 `protobuf:"fixed32,2,opt,name=top,proto3" json:"top,omitempty"`
// Right X coordinate.
Right float32 `protobuf:"fixed32,3,opt,name=right,proto3" json:"right,omitempty"`
// Bottom Y coordinate.
Bottom float32 `protobuf:"fixed32,4,opt,name=bottom,proto3" json:"bottom,omitempty"`
// contains filtered or unexported fields
}
Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1].
func (*NormalizedBoundingBox) Descriptor
func (*NormalizedBoundingBox) Descriptor() ([]byte, []int)
Deprecated: Use NormalizedBoundingBox.ProtoReflect.Descriptor instead.
func (*NormalizedBoundingBox) GetBottom
func (x *NormalizedBoundingBox) GetBottom() float32
func (*NormalizedBoundingBox) GetLeft
func (x *NormalizedBoundingBox) GetLeft() float32
func (*NormalizedBoundingBox) GetRight
func (x *NormalizedBoundingBox) GetRight() float32
func (*NormalizedBoundingBox) GetTop
func (x *NormalizedBoundingBox) GetTop() float32
func (*NormalizedBoundingBox) ProtoMessage
func (*NormalizedBoundingBox) ProtoMessage()
func (*NormalizedBoundingBox) ProtoReflect
func (x *NormalizedBoundingBox) ProtoReflect() protoreflect.Message
func (*NormalizedBoundingBox) Reset
func (x *NormalizedBoundingBox) Reset()
func (*NormalizedBoundingBox) String
func (x *NormalizedBoundingBox) String() string
NormalizedBoundingPoly
type NormalizedBoundingPoly struct {
// Normalized vertices of the bounding polygon.
Vertices []*NormalizedVertex `protobuf:"bytes,1,rep,name=vertices,proto3" json:"vertices,omitempty"`
// contains filtered or unexported fields
}
Normalized bounding polygon for text (that might not be aligned with axis). Contains list of the corner points in clockwise order starting from top-left corner. For example, for a rectangular bounding box: When the text is horizontal it might look like:
0----1
| |
3----2
When it's clockwise rotated 180 degrees around the top-left corner it becomes:
2----3
| |
1----0
and the vertex order will still be (0, 1, 2, 3). Note that values can be less than 0, or greater than 1 due to trignometric calculations for location of the box.
func (*NormalizedBoundingPoly) Descriptor
func (*NormalizedBoundingPoly) Descriptor() ([]byte, []int)
Deprecated: Use NormalizedBoundingPoly.ProtoReflect.Descriptor instead.
func (*NormalizedBoundingPoly) GetVertices
func (x *NormalizedBoundingPoly) GetVertices() []*NormalizedVertex
func (*NormalizedBoundingPoly) ProtoMessage
func (*NormalizedBoundingPoly) ProtoMessage()
func (*NormalizedBoundingPoly) ProtoReflect
func (x *NormalizedBoundingPoly) ProtoReflect() protoreflect.Message
func (*NormalizedBoundingPoly) Reset
func (x *NormalizedBoundingPoly) Reset()
func (*NormalizedBoundingPoly) String
func (x *NormalizedBoundingPoly) String() string
NormalizedVertex
type NormalizedVertex struct {
// X coordinate.
X float32 `protobuf:"fixed32,1,opt,name=x,proto3" json:"x,omitempty"`
// Y coordinate.
Y float32 `protobuf:"fixed32,2,opt,name=y,proto3" json:"y,omitempty"`
// contains filtered or unexported fields
}
A vertex represents a 2D point in the image. NOTE: the normalized vertex coordinates are relative to the original image and range from 0 to 1.
func (*NormalizedVertex) Descriptor
func (*NormalizedVertex) Descriptor() ([]byte, []int)
Deprecated: Use NormalizedVertex.ProtoReflect.Descriptor instead.
func (*NormalizedVertex) GetX
func (x *NormalizedVertex) GetX() float32
func (*NormalizedVertex) GetY
func (x *NormalizedVertex) GetY() float32
func (*NormalizedVertex) ProtoMessage
func (*NormalizedVertex) ProtoMessage()
func (*NormalizedVertex) ProtoReflect
func (x *NormalizedVertex) ProtoReflect() protoreflect.Message
func (*NormalizedVertex) Reset
func (x *NormalizedVertex) Reset()
func (*NormalizedVertex) String
func (x *NormalizedVertex) String() string
ObjectTrackingAnnotation
type ObjectTrackingAnnotation struct {
// Different representation of tracking info in non-streaming batch
// and streaming modes.
//
// Types that are assignable to TrackInfo:
//
// *ObjectTrackingAnnotation_Segment
// *ObjectTrackingAnnotation_TrackId
TrackInfo isObjectTrackingAnnotation_TrackInfo `protobuf_oneof:"track_info"`
// Entity to specify the object category that this track is labeled as.
Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
// Object category's labeling confidence of this track.
Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Information corresponding to all frames where this object track appears.
// Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
// messages in frames.
// Streaming mode: it can only be one ObjectTrackingFrame message in frames.
Frames []*ObjectTrackingFrame `protobuf:"bytes,2,rep,name=frames,proto3" json:"frames,omitempty"`
// Feature version.
Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
Annotations corresponding to one tracked object.
func (*ObjectTrackingAnnotation) Descriptor
func (*ObjectTrackingAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use ObjectTrackingAnnotation.ProtoReflect.Descriptor instead.
func (*ObjectTrackingAnnotation) GetConfidence
func (x *ObjectTrackingAnnotation) GetConfidence() float32
func (*ObjectTrackingAnnotation) GetEntity
func (x *ObjectTrackingAnnotation) GetEntity() *Entity
func (*ObjectTrackingAnnotation) GetFrames
func (x *ObjectTrackingAnnotation) GetFrames() []*ObjectTrackingFrame
func (*ObjectTrackingAnnotation) GetSegment
func (x *ObjectTrackingAnnotation) GetSegment() *VideoSegment
func (*ObjectTrackingAnnotation) GetTrackId
func (x *ObjectTrackingAnnotation) GetTrackId() int64
func (*ObjectTrackingAnnotation) GetTrackInfo
func (m *ObjectTrackingAnnotation) GetTrackInfo() isObjectTrackingAnnotation_TrackInfo
func (*ObjectTrackingAnnotation) GetVersion
func (x *ObjectTrackingAnnotation) GetVersion() string
func (*ObjectTrackingAnnotation) ProtoMessage
func (*ObjectTrackingAnnotation) ProtoMessage()
func (*ObjectTrackingAnnotation) ProtoReflect
func (x *ObjectTrackingAnnotation) ProtoReflect() protoreflect.Message
func (*ObjectTrackingAnnotation) Reset
func (x *ObjectTrackingAnnotation) Reset()
func (*ObjectTrackingAnnotation) String
func (x *ObjectTrackingAnnotation) String() string
ObjectTrackingAnnotation_Segment
type ObjectTrackingAnnotation_Segment struct {
// Non-streaming batch mode ONLY.
// Each object track corresponds to one video segment where it appears.
Segment *VideoSegment `protobuf:"bytes,3,opt,name=segment,proto3,oneof"`
}
ObjectTrackingAnnotation_TrackId
type ObjectTrackingAnnotation_TrackId struct {
// Streaming mode ONLY.
// In streaming mode, we do not know the end time of a tracked object
// before it is completed. Hence, there is no VideoSegment info returned.
// Instead, we provide a unique identifiable integer track_id so that
// the customers can correlate the results of the ongoing
// ObjectTrackAnnotation of the same track_id over time.
TrackId int64 `protobuf:"varint,5,opt,name=track_id,json=trackId,proto3,oneof"`
}
ObjectTrackingConfig
type ObjectTrackingConfig struct {
// Model to use for object tracking.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}
Config for OBJECT_TRACKING.
func (*ObjectTrackingConfig) Descriptor
func (*ObjectTrackingConfig) Descriptor() ([]byte, []int)
Deprecated: Use ObjectTrackingConfig.ProtoReflect.Descriptor instead.
func (*ObjectTrackingConfig) GetModel
func (x *ObjectTrackingConfig) GetModel() string
func (*ObjectTrackingConfig) ProtoMessage
func (*ObjectTrackingConfig) ProtoMessage()
func (*ObjectTrackingConfig) ProtoReflect
func (x *ObjectTrackingConfig) ProtoReflect() protoreflect.Message
func (*ObjectTrackingConfig) Reset
func (x *ObjectTrackingConfig) Reset()
func (*ObjectTrackingConfig) String
func (x *ObjectTrackingConfig) String() string
ObjectTrackingFrame
type ObjectTrackingFrame struct {
// The normalized bounding box location of this object track for the frame.
NormalizedBoundingBox *NormalizedBoundingBox `protobuf:"bytes,1,opt,name=normalized_bounding_box,json=normalizedBoundingBox,proto3" json:"normalized_bounding_box,omitempty"`
// The timestamp of the frame in microseconds.
TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// contains filtered or unexported fields
}
Video frame level annotations for object detection and tracking. This field stores per frame location, time offset, and confidence.
func (*ObjectTrackingFrame) Descriptor
func (*ObjectTrackingFrame) Descriptor() ([]byte, []int)
Deprecated: Use ObjectTrackingFrame.ProtoReflect.Descriptor instead.
func (*ObjectTrackingFrame) GetNormalizedBoundingBox
func (x *ObjectTrackingFrame) GetNormalizedBoundingBox() *NormalizedBoundingBox
func (*ObjectTrackingFrame) GetTimeOffset
func (x *ObjectTrackingFrame) GetTimeOffset() *durationpb.Duration
func (*ObjectTrackingFrame) ProtoMessage
func (*ObjectTrackingFrame) ProtoMessage()
func (*ObjectTrackingFrame) ProtoReflect
func (x *ObjectTrackingFrame) ProtoReflect() protoreflect.Message
func (*ObjectTrackingFrame) Reset
func (x *ObjectTrackingFrame) Reset()
func (*ObjectTrackingFrame) String
func (x *ObjectTrackingFrame) String() string
PersonDetectionAnnotation
type PersonDetectionAnnotation struct {
// The detected tracks of a person.
Tracks []*Track `protobuf:"bytes,1,rep,name=tracks,proto3" json:"tracks,omitempty"`
// Feature version.
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
Person detection annotation per video.
func (*PersonDetectionAnnotation) Descriptor
func (*PersonDetectionAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use PersonDetectionAnnotation.ProtoReflect.Descriptor instead.
func (*PersonDetectionAnnotation) GetTracks
func (x *PersonDetectionAnnotation) GetTracks() []*Track
func (*PersonDetectionAnnotation) GetVersion
func (x *PersonDetectionAnnotation) GetVersion() string
func (*PersonDetectionAnnotation) ProtoMessage
func (*PersonDetectionAnnotation) ProtoMessage()
func (*PersonDetectionAnnotation) ProtoReflect
func (x *PersonDetectionAnnotation) ProtoReflect() protoreflect.Message
func (*PersonDetectionAnnotation) Reset
func (x *PersonDetectionAnnotation) Reset()
func (*PersonDetectionAnnotation) String
func (x *PersonDetectionAnnotation) String() string
PersonDetectionConfig
type PersonDetectionConfig struct {
// Whether bounding boxes are included in the person detection annotation
// output.
IncludeBoundingBoxes bool `protobuf:"varint,1,opt,name=include_bounding_boxes,json=includeBoundingBoxes,proto3" json:"include_bounding_boxes,omitempty"`
// Whether to enable pose landmarks detection. Ignored if
// 'include_bounding_boxes' is set to false.
IncludePoseLandmarks bool `protobuf:"varint,2,opt,name=include_pose_landmarks,json=includePoseLandmarks,proto3" json:"include_pose_landmarks,omitempty"`
// Whether to enable person attributes detection, such as cloth color (black,
// blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
// etc.
// Ignored if 'include_bounding_boxes' is set to false.
IncludeAttributes bool `protobuf:"varint,3,opt,name=include_attributes,json=includeAttributes,proto3" json:"include_attributes,omitempty"`
// contains filtered or unexported fields
}
Config for PERSON_DETECTION.
func (*PersonDetectionConfig) Descriptor
func (*PersonDetectionConfig) Descriptor() ([]byte, []int)
Deprecated: Use PersonDetectionConfig.ProtoReflect.Descriptor instead.
func (*PersonDetectionConfig) GetIncludeAttributes
func (x *PersonDetectionConfig) GetIncludeAttributes() bool
func (*PersonDetectionConfig) GetIncludeBoundingBoxes
func (x *PersonDetectionConfig) GetIncludeBoundingBoxes() bool
func (*PersonDetectionConfig) GetIncludePoseLandmarks
func (x *PersonDetectionConfig) GetIncludePoseLandmarks() bool
func (*PersonDetectionConfig) ProtoMessage
func (*PersonDetectionConfig) ProtoMessage()
func (*PersonDetectionConfig) ProtoReflect
func (x *PersonDetectionConfig) ProtoReflect() protoreflect.Message
func (*PersonDetectionConfig) Reset
func (x *PersonDetectionConfig) Reset()
func (*PersonDetectionConfig) String
func (x *PersonDetectionConfig) String() string
ShotChangeDetectionConfig
type ShotChangeDetectionConfig struct {
// Model to use for shot change detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}
Config for SHOT_CHANGE_DETECTION.
func (*ShotChangeDetectionConfig) Descriptor
func (*ShotChangeDetectionConfig) Descriptor() ([]byte, []int)
Deprecated: Use ShotChangeDetectionConfig.ProtoReflect.Descriptor instead.
func (*ShotChangeDetectionConfig) GetModel
func (x *ShotChangeDetectionConfig) GetModel() string
func (*ShotChangeDetectionConfig) ProtoMessage
func (*ShotChangeDetectionConfig) ProtoMessage()
func (*ShotChangeDetectionConfig) ProtoReflect
func (x *ShotChangeDetectionConfig) ProtoReflect() protoreflect.Message
func (*ShotChangeDetectionConfig) Reset
func (x *ShotChangeDetectionConfig) Reset()
func (*ShotChangeDetectionConfig) String
func (x *ShotChangeDetectionConfig) String() string
SpeechContext
type SpeechContext struct {
// Optional. A list of strings containing words and phrases "hints" so that
// the speech recognition is more likely to recognize them. This can be used
// to improve the accuracy for specific words and phrases, for example, if
// specific commands are typically spoken by the user. This can also be used
// to add additional words to the vocabulary of the recognizer. See
// [usage limits](https://cloud.google.com/speech/limits#content).
Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
// contains filtered or unexported fields
}
Provides "hints" to the speech recognizer to favor specific words and phrases in the results.
func (*SpeechContext) Descriptor
func (*SpeechContext) Descriptor() ([]byte, []int)
Deprecated: Use SpeechContext.ProtoReflect.Descriptor instead.
func (*SpeechContext) GetPhrases
func (x *SpeechContext) GetPhrases() []string
func (*SpeechContext) ProtoMessage
func (*SpeechContext) ProtoMessage()
func (*SpeechContext) ProtoReflect
func (x *SpeechContext) ProtoReflect() protoreflect.Message
func (*SpeechContext) Reset
func (x *SpeechContext) Reset()
func (*SpeechContext) String
func (x *SpeechContext) String() string
SpeechRecognitionAlternative
type SpeechRecognitionAlternative struct {
// Transcript text representing the words that the user spoke.
Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
// Output only. The confidence estimate between 0.0 and 1.0. A higher number
// indicates an estimated greater likelihood that the recognized words are
// correct. This field is set only for the top alternative.
// This field is not guaranteed to be accurate and users should not rely on it
// to be always provided.
// The default of 0.0 is a sentinel value indicating `confidence` was not set.
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Output only. A list of word-specific information for each recognized word.
// Note: When `enable_speaker_diarization` is set to true, you will see all
// the words from the beginning of the audio.
Words []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
// contains filtered or unexported fields
}
Alternative hypotheses (a.k.a. n-best list).
func (*SpeechRecognitionAlternative) Descriptor
func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int)
Deprecated: Use SpeechRecognitionAlternative.ProtoReflect.Descriptor instead.
func (*SpeechRecognitionAlternative) GetConfidence
func (x *SpeechRecognitionAlternative) GetConfidence() float32
func (*SpeechRecognitionAlternative) GetTranscript
func (x *SpeechRecognitionAlternative) GetTranscript() string
func (*SpeechRecognitionAlternative) GetWords
func (x *SpeechRecognitionAlternative) GetWords() []*WordInfo
func (*SpeechRecognitionAlternative) ProtoMessage
func (*SpeechRecognitionAlternative) ProtoMessage()
func (*SpeechRecognitionAlternative) ProtoReflect
func (x *SpeechRecognitionAlternative) ProtoReflect() protoreflect.Message
func (*SpeechRecognitionAlternative) Reset
func (x *SpeechRecognitionAlternative) Reset()
func (*SpeechRecognitionAlternative) String
func (x *SpeechRecognitionAlternative) String() string
SpeechTranscription
type SpeechTranscription struct {
// May contain one or more recognition hypotheses (up to the maximum specified
// in `max_alternatives`). These alternatives are ordered in terms of
// accuracy, with the top (first) alternative being the most probable, as
// ranked by the recognizer.
Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
// Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
// language tag of the language in this result. This language code was
// detected to have the most likelihood of being spoken in the audio.
LanguageCode string `protobuf:"bytes,2,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// contains filtered or unexported fields
}
A speech recognition result corresponding to a portion of the audio.
func (*SpeechTranscription) Descriptor
func (*SpeechTranscription) Descriptor() ([]byte, []int)
Deprecated: Use SpeechTranscription.ProtoReflect.Descriptor instead.
func (*SpeechTranscription) GetAlternatives
func (x *SpeechTranscription) GetAlternatives() []*SpeechRecognitionAlternative
func (*SpeechTranscription) GetLanguageCode
func (x *SpeechTranscription) GetLanguageCode() string
func (*SpeechTranscription) ProtoMessage
func (*SpeechTranscription) ProtoMessage()
func (*SpeechTranscription) ProtoReflect
func (x *SpeechTranscription) ProtoReflect() protoreflect.Message
func (*SpeechTranscription) Reset
func (x *SpeechTranscription) Reset()
func (*SpeechTranscription) String
func (x *SpeechTranscription) String() string
SpeechTranscriptionConfig
type SpeechTranscriptionConfig struct {
LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
MaxAlternatives int32 `protobuf:"varint,2,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
FilterProfanity bool `protobuf:"varint,3,opt,name=filter_profanity,json=filterProfanity,proto3" json:"filter_profanity,omitempty"`
SpeechContexts []*SpeechContext `protobuf:"bytes,4,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
EnableAutomaticPunctuation bool "" /* 142 byte string literal not displayed */
AudioTracks []int32 `protobuf:"varint,6,rep,packed,name=audio_tracks,json=audioTracks,proto3" json:"audio_tracks,omitempty"`
EnableSpeakerDiarization bool "" /* 136 byte string literal not displayed */
DiarizationSpeakerCount int32 "" /* 133 byte string literal not displayed */
EnableWordConfidence bool `protobuf:"varint,9,opt,name=enable_word_confidence,json=enableWordConfidence,proto3" json:"enable_word_confidence,omitempty"`
}
Config for SPEECH_TRANSCRIPTION.
func (*SpeechTranscriptionConfig) Descriptor
func (*SpeechTranscriptionConfig) Descriptor() ([]byte, []int)
Deprecated: Use SpeechTranscriptionConfig.ProtoReflect.Descriptor instead.
func (*SpeechTranscriptionConfig) GetAudioTracks
func (x *SpeechTranscriptionConfig) GetAudioTracks() []int32
func (*SpeechTranscriptionConfig) GetDiarizationSpeakerCount
func (x *SpeechTranscriptionConfig) GetDiarizationSpeakerCount() int32
func (*SpeechTranscriptionConfig) GetEnableAutomaticPunctuation
func (x *SpeechTranscriptionConfig) GetEnableAutomaticPunctuation() bool
func (*SpeechTranscriptionConfig) GetEnableSpeakerDiarization
func (x *SpeechTranscriptionConfig) GetEnableSpeakerDiarization() bool
func (*SpeechTranscriptionConfig) GetEnableWordConfidence
func (x *SpeechTranscriptionConfig) GetEnableWordConfidence() bool
func (*SpeechTranscriptionConfig) GetFilterProfanity
func (x *SpeechTranscriptionConfig) GetFilterProfanity() bool
func (*SpeechTranscriptionConfig) GetLanguageCode
func (x *SpeechTranscriptionConfig) GetLanguageCode() string
func (*SpeechTranscriptionConfig) GetMaxAlternatives
func (x *SpeechTranscriptionConfig) GetMaxAlternatives() int32
func (*SpeechTranscriptionConfig) GetSpeechContexts
func (x *SpeechTranscriptionConfig) GetSpeechContexts() []*SpeechContext
func (*SpeechTranscriptionConfig) ProtoMessage
func (*SpeechTranscriptionConfig) ProtoMessage()
func (*SpeechTranscriptionConfig) ProtoReflect
func (x *SpeechTranscriptionConfig) ProtoReflect() protoreflect.Message
func (*SpeechTranscriptionConfig) Reset
func (x *SpeechTranscriptionConfig) Reset()
func (*SpeechTranscriptionConfig) String
func (x *SpeechTranscriptionConfig) String() string
TextAnnotation
type TextAnnotation struct {
// The detected text.
Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
// All video segments where OCR detected text appears.
Segments []*TextSegment `protobuf:"bytes,2,rep,name=segments,proto3" json:"segments,omitempty"`
// Feature version.
Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
Annotations related to one detected OCR text snippet. This will contain the corresponding text, confidence value, and frame level information for each detection.
func (*TextAnnotation) Descriptor
func (*TextAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use TextAnnotation.ProtoReflect.Descriptor instead.
func (*TextAnnotation) GetSegments
func (x *TextAnnotation) GetSegments() []*TextSegment
func (*TextAnnotation) GetText
func (x *TextAnnotation) GetText() string
func (*TextAnnotation) GetVersion
func (x *TextAnnotation) GetVersion() string
func (*TextAnnotation) ProtoMessage
func (*TextAnnotation) ProtoMessage()
func (*TextAnnotation) ProtoReflect
func (x *TextAnnotation) ProtoReflect() protoreflect.Message
func (*TextAnnotation) Reset
func (x *TextAnnotation) Reset()
func (*TextAnnotation) String
func (x *TextAnnotation) String() string
TextDetectionConfig
type TextDetectionConfig struct {
// Language hint can be specified if the language to be detected is known a
// priori. It can increase the accuracy of the detection. Language hint must
// be language code in BCP-47 format.
//
// Automatic language detection is performed if no hint is provided.
LanguageHints []string `protobuf:"bytes,1,rep,name=language_hints,json=languageHints,proto3" json:"language_hints,omitempty"`
// Model to use for text detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,2,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}
Config for TEXT_DETECTION.
func (*TextDetectionConfig) Descriptor
func (*TextDetectionConfig) Descriptor() ([]byte, []int)
Deprecated: Use TextDetectionConfig.ProtoReflect.Descriptor instead.
func (*TextDetectionConfig) GetLanguageHints
func (x *TextDetectionConfig) GetLanguageHints() []string
func (*TextDetectionConfig) GetModel
func (x *TextDetectionConfig) GetModel() string
func (*TextDetectionConfig) ProtoMessage
func (*TextDetectionConfig) ProtoMessage()
func (*TextDetectionConfig) ProtoReflect
func (x *TextDetectionConfig) ProtoReflect() protoreflect.Message
func (*TextDetectionConfig) Reset
func (x *TextDetectionConfig) Reset()
func (*TextDetectionConfig) String
func (x *TextDetectionConfig) String() string
TextFrame
type TextFrame struct {
// Bounding polygon of the detected text for this frame.
RotatedBoundingBox *NormalizedBoundingPoly `protobuf:"bytes,1,opt,name=rotated_bounding_box,json=rotatedBoundingBox,proto3" json:"rotated_bounding_box,omitempty"`
// Timestamp of this frame.
TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// contains filtered or unexported fields
}
Video frame level annotation results for text annotation (OCR). Contains information regarding timestamp and bounding box locations for the frames containing detected OCR text snippets.
func (*TextFrame) Descriptor
Deprecated: Use TextFrame.ProtoReflect.Descriptor instead.
func (*TextFrame) GetRotatedBoundingBox
func (x *TextFrame) GetRotatedBoundingBox() *NormalizedBoundingPoly
func (*TextFrame) GetTimeOffset
func (x *TextFrame) GetTimeOffset() *durationpb.Duration
func (*TextFrame) ProtoMessage
func (*TextFrame) ProtoMessage()
func (*TextFrame) ProtoReflect
func (x *TextFrame) ProtoReflect() protoreflect.Message
func (*TextFrame) Reset
func (x *TextFrame) Reset()
func (*TextFrame) String
TextSegment
type TextSegment struct {
// Video segment where a text snippet was detected.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// Confidence for the track of detected text. It is calculated as the highest
// over all frames where OCR detected text appears.
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Information related to the frames where OCR detected text appears.
Frames []*TextFrame `protobuf:"bytes,3,rep,name=frames,proto3" json:"frames,omitempty"`
// contains filtered or unexported fields
}
Video segment level annotation results for text detection.
func (*TextSegment) Descriptor
func (*TextSegment) Descriptor() ([]byte, []int)
Deprecated: Use TextSegment.ProtoReflect.Descriptor instead.
func (*TextSegment) GetConfidence
func (x *TextSegment) GetConfidence() float32
func (*TextSegment) GetFrames
func (x *TextSegment) GetFrames() []*TextFrame
func (*TextSegment) GetSegment
func (x *TextSegment) GetSegment() *VideoSegment
func (*TextSegment) ProtoMessage
func (*TextSegment) ProtoMessage()
func (*TextSegment) ProtoReflect
func (x *TextSegment) ProtoReflect() protoreflect.Message
func (*TextSegment) Reset
func (x *TextSegment) Reset()
func (*TextSegment) String
func (x *TextSegment) String() string
TimestampedObject
type TimestampedObject struct {
// Normalized Bounding box in a frame, where the object is located.
NormalizedBoundingBox *NormalizedBoundingBox `protobuf:"bytes,1,opt,name=normalized_bounding_box,json=normalizedBoundingBox,proto3" json:"normalized_bounding_box,omitempty"`
// Time-offset, relative to the beginning of the video,
// corresponding to the video frame for this object.
TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// Optional. The attributes of the object in the bounding box.
Attributes []*DetectedAttribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
// Optional. The detected landmarks.
Landmarks []*DetectedLandmark `protobuf:"bytes,4,rep,name=landmarks,proto3" json:"landmarks,omitempty"`
// contains filtered or unexported fields
}
For tracking related features. An object at time_offset with attributes, and located with normalized_bounding_box.
func (*TimestampedObject) Descriptor
func (*TimestampedObject) Descriptor() ([]byte, []int)
Deprecated: Use TimestampedObject.ProtoReflect.Descriptor instead.
func (*TimestampedObject) GetAttributes
func (x *TimestampedObject) GetAttributes() []*DetectedAttribute
func (*TimestampedObject) GetLandmarks
func (x *TimestampedObject) GetLandmarks() []*DetectedLandmark
func (*TimestampedObject) GetNormalizedBoundingBox
func (x *TimestampedObject) GetNormalizedBoundingBox() *NormalizedBoundingBox
func (*TimestampedObject) GetTimeOffset
func (x *TimestampedObject) GetTimeOffset() *durationpb.Duration
func (*TimestampedObject) ProtoMessage
func (*TimestampedObject) ProtoMessage()
func (*TimestampedObject) ProtoReflect
func (x *TimestampedObject) ProtoReflect() protoreflect.Message
func (*TimestampedObject) Reset
func (x *TimestampedObject) Reset()
func (*TimestampedObject) String
func (x *TimestampedObject) String() string
Track
type Track struct {
// Video segment of a track.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// The object with timestamp and attributes per frame in the track.
TimestampedObjects []*TimestampedObject `protobuf:"bytes,2,rep,name=timestamped_objects,json=timestampedObjects,proto3" json:"timestamped_objects,omitempty"`
// Optional. Attributes in the track level.
Attributes []*DetectedAttribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
// Optional. The confidence score of the tracked object.
Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}
A track of an object instance.
func (*Track) Descriptor
Deprecated: Use Track.ProtoReflect.Descriptor instead.
func (*Track) GetAttributes
func (x *Track) GetAttributes() []*DetectedAttribute
func (*Track) GetConfidence
func (*Track) GetSegment
func (x *Track) GetSegment() *VideoSegment
func (*Track) GetTimestampedObjects
func (x *Track) GetTimestampedObjects() []*TimestampedObject
func (*Track) ProtoMessage
func (*Track) ProtoMessage()
func (*Track) ProtoReflect
func (x *Track) ProtoReflect() protoreflect.Message
func (*Track) Reset
func (x *Track) Reset()
func (*Track) String
UnimplementedVideoIntelligenceServiceServer
type UnimplementedVideoIntelligenceServiceServer struct {
}
UnimplementedVideoIntelligenceServiceServer can be embedded to have forward compatible implementations.
func (*UnimplementedVideoIntelligenceServiceServer) AnnotateVideo
func (*UnimplementedVideoIntelligenceServiceServer) AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunning.Operation, error)
VideoAnnotationProgress
type VideoAnnotationProgress struct {
// Video file location in
// [Cloud Storage](https://cloud.google.com/storage/).
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// Approximate percentage processed thus far. Guaranteed to be
// 100 when fully processed.
ProgressPercent int32 `protobuf:"varint,2,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
// Time when the request was received.
StartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Time of the most recent update.
UpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
// Specifies which feature is being tracked if the request contains more than
// one feature.
Feature Feature `protobuf:"varint,5,opt,name=feature,proto3,enum=google.cloud.videointelligence.v1.Feature" json:"feature,omitempty"`
// Specifies which segment is being tracked if the request contains more than
// one segment.
Segment *VideoSegment `protobuf:"bytes,6,opt,name=segment,proto3" json:"segment,omitempty"`
// contains filtered or unexported fields
}
Annotation progress for a single video.
func (*VideoAnnotationProgress) Descriptor
func (*VideoAnnotationProgress) Descriptor() ([]byte, []int)
Deprecated: Use VideoAnnotationProgress.ProtoReflect.Descriptor instead.
func (*VideoAnnotationProgress) GetFeature
func (x *VideoAnnotationProgress) GetFeature() Feature
func (*VideoAnnotationProgress) GetInputUri
func (x *VideoAnnotationProgress) GetInputUri() string
func (*VideoAnnotationProgress) GetProgressPercent
func (x *VideoAnnotationProgress) GetProgressPercent() int32
func (*VideoAnnotationProgress) GetSegment
func (x *VideoAnnotationProgress) GetSegment() *VideoSegment
func (*VideoAnnotationProgress) GetStartTime
func (x *VideoAnnotationProgress) GetStartTime() *timestamppb.Timestamp
func (*VideoAnnotationProgress) GetUpdateTime
func (x *VideoAnnotationProgress) GetUpdateTime() *timestamppb.Timestamp
func (*VideoAnnotationProgress) ProtoMessage
func (*VideoAnnotationProgress) ProtoMessage()
func (*VideoAnnotationProgress) ProtoReflect
func (x *VideoAnnotationProgress) ProtoReflect() protoreflect.Message
func (*VideoAnnotationProgress) Reset
func (x *VideoAnnotationProgress) Reset()
func (*VideoAnnotationProgress) String
func (x *VideoAnnotationProgress) String() string
VideoAnnotationResults
type VideoAnnotationResults struct {
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
Segment *VideoSegment `protobuf:"bytes,10,opt,name=segment,proto3" json:"segment,omitempty"`
SegmentLabelAnnotations []*LabelAnnotation "" /* 132 byte string literal not displayed */
SegmentPresenceLabelAnnotations []*LabelAnnotation "" /* 159 byte string literal not displayed */
ShotLabelAnnotations []*LabelAnnotation `protobuf:"bytes,3,rep,name=shot_label_annotations,json=shotLabelAnnotations,proto3" json:"shot_label_annotations,omitempty"`
ShotPresenceLabelAnnotations []*LabelAnnotation "" /* 150 byte string literal not displayed */
FrameLabelAnnotations []*LabelAnnotation `protobuf:"bytes,4,rep,name=frame_label_annotations,json=frameLabelAnnotations,proto3" json:"frame_label_annotations,omitempty"`
FaceAnnotations []*FaceAnnotation `protobuf:"bytes,5,rep,name=face_annotations,json=faceAnnotations,proto3" json:"face_annotations,omitempty"`
FaceDetectionAnnotations []*FaceDetectionAnnotation "" /* 136 byte string literal not displayed */
ShotAnnotations []*VideoSegment `protobuf:"bytes,6,rep,name=shot_annotations,json=shotAnnotations,proto3" json:"shot_annotations,omitempty"`
ExplicitAnnotation *ExplicitContentAnnotation `protobuf:"bytes,7,opt,name=explicit_annotation,json=explicitAnnotation,proto3" json:"explicit_annotation,omitempty"`
SpeechTranscriptions []*SpeechTranscription `protobuf:"bytes,11,rep,name=speech_transcriptions,json=speechTranscriptions,proto3" json:"speech_transcriptions,omitempty"`
TextAnnotations []*TextAnnotation `protobuf:"bytes,12,rep,name=text_annotations,json=textAnnotations,proto3" json:"text_annotations,omitempty"`
ObjectAnnotations []*ObjectTrackingAnnotation `protobuf:"bytes,14,rep,name=object_annotations,json=objectAnnotations,proto3" json:"object_annotations,omitempty"`
LogoRecognitionAnnotations []*LogoRecognitionAnnotation "" /* 142 byte string literal not displayed */
PersonDetectionAnnotations []*PersonDetectionAnnotation "" /* 142 byte string literal not displayed */
Error *status.Status `protobuf:"bytes,9,opt,name=error,proto3" json:"error,omitempty"`
}
Annotation results for a single video.
func (*VideoAnnotationResults) Descriptor
func (*VideoAnnotationResults) Descriptor() ([]byte, []int)
Deprecated: Use VideoAnnotationResults.ProtoReflect.Descriptor instead.
func (*VideoAnnotationResults) GetError
func (x *VideoAnnotationResults) GetError() *status.Status
func (*VideoAnnotationResults) GetExplicitAnnotation
func (x *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation
func (*VideoAnnotationResults) GetFaceAnnotations
func (x *VideoAnnotationResults) GetFaceAnnotations() []*FaceAnnotation
Deprecated: Do not use.
func (*VideoAnnotationResults) GetFaceDetectionAnnotations
func (x *VideoAnnotationResults) GetFaceDetectionAnnotations() []*FaceDetectionAnnotation
func (*VideoAnnotationResults) GetFrameLabelAnnotations
func (x *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotation
func (*VideoAnnotationResults) GetInputUri
func (x *VideoAnnotationResults) GetInputUri() string
func (*VideoAnnotationResults) GetLogoRecognitionAnnotations
func (x *VideoAnnotationResults) GetLogoRecognitionAnnotations() []*LogoRecognitionAnnotation
func (*VideoAnnotationResults) GetObjectAnnotations
func (x *VideoAnnotationResults) GetObjectAnnotations() []*ObjectTrackingAnnotation
func (*VideoAnnotationResults) GetPersonDetectionAnnotations
func (x *VideoAnnotationResults) GetPersonDetectionAnnotations() []*PersonDetectionAnnotation
func (*VideoAnnotationResults) GetSegment
func (x *VideoAnnotationResults) GetSegment() *VideoSegment
func (*VideoAnnotationResults) GetSegmentLabelAnnotations
func (x *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotation
func (*VideoAnnotationResults) GetSegmentPresenceLabelAnnotations
func (x *VideoAnnotationResults) GetSegmentPresenceLabelAnnotations() []*LabelAnnotation
func (*VideoAnnotationResults) GetShotAnnotations
func (x *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment
func (*VideoAnnotationResults) GetShotLabelAnnotations
func (x *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotation
func (*VideoAnnotationResults) GetShotPresenceLabelAnnotations
func (x *VideoAnnotationResults) GetShotPresenceLabelAnnotations() []*LabelAnnotation
func (*VideoAnnotationResults) GetSpeechTranscriptions
func (x *VideoAnnotationResults) GetSpeechTranscriptions() []*SpeechTranscription
func (*VideoAnnotationResults) GetTextAnnotations
func (x *VideoAnnotationResults) GetTextAnnotations() []*TextAnnotation
func (*VideoAnnotationResults) ProtoMessage
func (*VideoAnnotationResults) ProtoMessage()
func (*VideoAnnotationResults) ProtoReflect
func (x *VideoAnnotationResults) ProtoReflect() protoreflect.Message
func (*VideoAnnotationResults) Reset
func (x *VideoAnnotationResults) Reset()
func (*VideoAnnotationResults) String
func (x *VideoAnnotationResults) String() string
VideoContext
type VideoContext struct {
Segments []*VideoSegment `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"`
LabelDetectionConfig *LabelDetectionConfig `protobuf:"bytes,2,opt,name=label_detection_config,json=labelDetectionConfig,proto3" json:"label_detection_config,omitempty"`
ShotChangeDetectionConfig *ShotChangeDetectionConfig "" /* 140 byte string literal not displayed */
ExplicitContentDetectionConfig *ExplicitContentDetectionConfig "" /* 155 byte string literal not displayed */
FaceDetectionConfig *FaceDetectionConfig `protobuf:"bytes,5,opt,name=face_detection_config,json=faceDetectionConfig,proto3" json:"face_detection_config,omitempty"`
SpeechTranscriptionConfig *SpeechTranscriptionConfig "" /* 138 byte string literal not displayed */
TextDetectionConfig *TextDetectionConfig `protobuf:"bytes,8,opt,name=text_detection_config,json=textDetectionConfig,proto3" json:"text_detection_config,omitempty"`
PersonDetectionConfig *PersonDetectionConfig "" /* 127 byte string literal not displayed */
ObjectTrackingConfig *ObjectTrackingConfig `protobuf:"bytes,13,opt,name=object_tracking_config,json=objectTrackingConfig,proto3" json:"object_tracking_config,omitempty"`
}
Video context and/or feature-specific parameters.
func (*VideoContext) Descriptor
func (*VideoContext) Descriptor() ([]byte, []int)
Deprecated: Use VideoContext.ProtoReflect.Descriptor instead.
func (*VideoContext) GetExplicitContentDetectionConfig
func (x *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfig
func (*VideoContext) GetFaceDetectionConfig
func (x *VideoContext) GetFaceDetectionConfig() *FaceDetectionConfig
func (*VideoContext) GetLabelDetectionConfig
func (x *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfig
func (*VideoContext) GetObjectTrackingConfig
func (x *VideoContext) GetObjectTrackingConfig() *ObjectTrackingConfig
func (*VideoContext) GetPersonDetectionConfig
func (x *VideoContext) GetPersonDetectionConfig() *PersonDetectionConfig
func (*VideoContext) GetSegments
func (x *VideoContext) GetSegments() []*VideoSegment
func (*VideoContext) GetShotChangeDetectionConfig
func (x *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfig
func (*VideoContext) GetSpeechTranscriptionConfig
func (x *VideoContext) GetSpeechTranscriptionConfig() *SpeechTranscriptionConfig
func (*VideoContext) GetTextDetectionConfig
func (x *VideoContext) GetTextDetectionConfig() *TextDetectionConfig
func (*VideoContext) ProtoMessage
func (*VideoContext) ProtoMessage()
func (*VideoContext) ProtoReflect
func (x *VideoContext) ProtoReflect() protoreflect.Message
func (*VideoContext) Reset
func (x *VideoContext) Reset()
func (*VideoContext) String
func (x *VideoContext) String() string
VideoIntelligenceServiceClient
type VideoIntelligenceServiceClient interface {
// Performs asynchronous video annotation. Progress and results can be
// retrieved through the `google.longrunning.Operations` interface.
// `Operation.metadata` contains `AnnotateVideoProgress` (progress).
// `Operation.response` contains `AnnotateVideoResponse` (results).
AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
}
VideoIntelligenceServiceClient is the client API for VideoIntelligenceService service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewVideoIntelligenceServiceClient
func NewVideoIntelligenceServiceClient(cc grpc.ClientConnInterface) VideoIntelligenceServiceClient
VideoIntelligenceServiceServer
type VideoIntelligenceServiceServer interface {
// Performs asynchronous video annotation. Progress and results can be
// retrieved through the `google.longrunning.Operations` interface.
// `Operation.metadata` contains `AnnotateVideoProgress` (progress).
// `Operation.response` contains `AnnotateVideoResponse` (results).
AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunning.Operation, error)
}
VideoIntelligenceServiceServer is the server API for VideoIntelligenceService service.
VideoSegment
type VideoSegment struct {
// Time-offset, relative to the beginning of the video,
// corresponding to the start of the segment (inclusive).
StartTimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=start_time_offset,json=startTimeOffset,proto3" json:"start_time_offset,omitempty"`
// Time-offset, relative to the beginning of the video,
// corresponding to the end of the segment (inclusive).
EndTimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=end_time_offset,json=endTimeOffset,proto3" json:"end_time_offset,omitempty"`
// contains filtered or unexported fields
}
Video segment.
func (*VideoSegment) Descriptor
func (*VideoSegment) Descriptor() ([]byte, []int)
Deprecated: Use VideoSegment.ProtoReflect.Descriptor instead.
func (*VideoSegment) GetEndTimeOffset
func (x *VideoSegment) GetEndTimeOffset() *durationpb.Duration
func (*VideoSegment) GetStartTimeOffset
func (x *VideoSegment) GetStartTimeOffset() *durationpb.Duration
func (*VideoSegment) ProtoMessage
func (*VideoSegment) ProtoMessage()
func (*VideoSegment) ProtoReflect
func (x *VideoSegment) ProtoReflect() protoreflect.Message
func (*VideoSegment) Reset
func (x *VideoSegment) Reset()
func (*VideoSegment) String
func (x *VideoSegment) String() string
WordInfo
type WordInfo struct {
// Time offset relative to the beginning of the audio, and
// corresponding to the start of the spoken word. This field is only set if
// `enable_word_time_offsets=true` and only in the top hypothesis. This is an
// experimental feature and the accuracy of the time offset can vary.
StartTime *durationpb.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Time offset relative to the beginning of the audio, and
// corresponding to the end of the spoken word. This field is only set if
// `enable_word_time_offsets=true` and only in the top hypothesis. This is an
// experimental feature and the accuracy of the time offset can vary.
EndTime *durationpb.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
// The word corresponding to this set of information.
Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
// Output only. The confidence estimate between 0.0 and 1.0. A higher number
// indicates an estimated greater likelihood that the recognized words are
// correct. This field is set only for the top alternative.
// This field is not guaranteed to be accurate and users should not rely on it
// to be always provided.
// The default of 0.0 is a sentinel value indicating `confidence` was not set.
Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Output only. A distinct integer value is assigned for every speaker within
// the audio. This field specifies which one of those speakers was detected to
// have spoken this word. Value ranges from 1 up to diarization_speaker_count,
// and is only set if speaker diarization is enabled.
SpeakerTag int32 `protobuf:"varint,5,opt,name=speaker_tag,json=speakerTag,proto3" json:"speaker_tag,omitempty"`
// contains filtered or unexported fields
}
Word-specific information for recognized words. Word information is only
included in the response when certain request parameters are set, such
as enable_word_time_offsets
.
func (*WordInfo) Descriptor
Deprecated: Use WordInfo.ProtoReflect.Descriptor instead.
func (*WordInfo) GetConfidence
func (*WordInfo) GetEndTime
func (x *WordInfo) GetEndTime() *durationpb.Duration
func (*WordInfo) GetSpeakerTag
func (*WordInfo) GetStartTime
func (x *WordInfo) GetStartTime() *durationpb.Duration
func (*WordInfo) GetWord
func (*WordInfo) ProtoMessage
func (*WordInfo) ProtoMessage()
func (*WordInfo) ProtoReflect
func (x *WordInfo) ProtoReflect() protoreflect.Message
func (*WordInfo) Reset
func (x *WordInfo) Reset()