Annotez un fichier vidéo stocké dans Cloud Storage.
Pages de documentation incluant cet exemple de code
Pour afficher l'exemple de code utilisé en contexte, consultez la documentation suivante :
Exemple de code
Go
func labelURI(w io.Writer, file string) error {
ctx := context.Background()
client, err := video.NewClient(ctx)
if err != nil {
return fmt.Errorf("video.NewClient: %v", err)
}
defer client.Close()
op, err := client.AnnotateVideo(ctx, &videopb.AnnotateVideoRequest{
Features: []videopb.Feature{
videopb.Feature_LABEL_DETECTION,
},
InputUri: file,
})
if err != nil {
return fmt.Errorf("AnnotateVideo: %v", err)
}
resp, err := op.Wait(ctx)
if err != nil {
return fmt.Errorf("Wait: %v", err)
}
printLabels := func(labels []*videopb.LabelAnnotation) {
for _, label := range labels {
fmt.Fprintf(w, "\tDescription: %s\n", label.Entity.Description)
for _, category := range label.CategoryEntities {
fmt.Fprintf(w, "\t\tCategory: %s\n", category.Description)
}
for _, segment := range label.Segments {
start, _ := ptypes.Duration(segment.Segment.StartTimeOffset)
end, _ := ptypes.Duration(segment.Segment.EndTimeOffset)
fmt.Fprintf(w, "\t\tSegment: %s to %s\n", start, end)
}
}
}
// A single video was processed. Get the first result.
result := resp.AnnotationResults[0]
fmt.Fprintln(w, "SegmentLabelAnnotations:")
printLabels(result.SegmentLabelAnnotations)
fmt.Fprintln(w, "ShotLabelAnnotations:")
printLabels(result.ShotLabelAnnotations)
fmt.Fprintln(w, "FrameLabelAnnotations:")
printLabels(result.FrameLabelAnnotations)
return nil
}
Java
// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Provide path to file hosted on GCS as "gs://bucket-name/..."
AnnotateVideoRequest request =
AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.LABEL_DETECTION)
.build();
// Create an operation that will contain the response when the operation completes.
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response =
client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
for (VideoAnnotationResults results : response.get().getAnnotationResultsList()) {
// process video / segment level label annotations
System.out.println("Locations: ");
for (LabelAnnotation labelAnnotation : results.getSegmentLabelAnnotationsList()) {
System.out.println("Video label: " + labelAnnotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
System.out.println("Video label category: " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
double startTime =
segment.getSegment().getStartTimeOffset().getSeconds()
+ segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime =
segment.getSegment().getEndTimeOffset().getSeconds()
+ segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3f:%.3f\n", startTime, endTime);
System.out.println("Confidence: " + segment.getConfidence());
}
}
// process shot label annotations
for (LabelAnnotation labelAnnotation : results.getShotLabelAnnotationsList()) {
System.out.println("Shot label: " + labelAnnotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
System.out.println("Shot label category: " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
double startTime =
segment.getSegment().getStartTimeOffset().getSeconds()
+ segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime =
segment.getSegment().getEndTimeOffset().getSeconds()
+ segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3f:%.3f\n", startTime, endTime);
System.out.println("Confidence: " + segment.getConfidence());
}
}
// process frame label annotations
for (LabelAnnotation labelAnnotation : results.getFrameLabelAnnotationsList()) {
System.out.println("Frame label: " + labelAnnotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
System.out.println("Frame label category: " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
double startTime =
segment.getSegment().getStartTimeOffset().getSeconds()
+ segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime =
segment.getSegment().getEndTimeOffset().getSeconds()
+ segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
System.out.println("Confidence: " + segment.getConfidence());
}
}
}
}
Node.js
// Imports the Google Cloud Video Intelligence library
const video = require('@google-cloud/video-intelligence').v1;
// Creates a client
const client = new video.VideoIntelligenceServiceClient();
/**
* TODO(developer): Uncomment the following line before running the sample.
*/
// const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4';
const request = {
inputUri: gcsUri,
features: ['LABEL_DETECTION'],
};
// Detects labels in a video
const [operation] = await client.annotateVideo(request);
console.log('Waiting for operation to complete...');
const [operationResult] = await operation.promise();
// Gets annotations for video
const annotations = operationResult.annotationResults[0];
const labels = annotations.segmentLabelAnnotations;
labels.forEach(label => {
console.log(`Label ${label.entity.description} occurs at:`);
label.segments.forEach(segment => {
const time = segment.segment;
if (time.startTimeOffset.seconds === undefined) {
time.startTimeOffset.seconds = 0;
}
if (time.startTimeOffset.nanos === undefined) {
time.startTimeOffset.nanos = 0;
}
if (time.endTimeOffset.seconds === undefined) {
time.endTimeOffset.seconds = 0;
}
if (time.endTimeOffset.nanos === undefined) {
time.endTimeOffset.nanos = 0;
}
console.log(
`\tStart: ${time.startTimeOffset.seconds}` +
`.${(time.startTimeOffset.nanos / 1e6).toFixed(0)}s`
);
console.log(
`\tEnd: ${time.endTimeOffset.seconds}.` +
`${(time.endTimeOffset.nanos / 1e6).toFixed(0)}s`
);
console.log(`\tConfidence: ${segment.confidence}`);
});
});
PHP
use Google\Cloud\VideoIntelligence\V1\VideoIntelligenceServiceClient;
use Google\Cloud\VideoIntelligence\V1\Feature;
/** Uncomment and populate these variables in your code */
// $uri = 'The cloud storage object to analyze (gs://your-bucket-name/your-object-name)';
// $options = [];
# Instantiate a client.
$video = new VideoIntelligenceServiceClient();
# Execute a request.
$features = [Feature::LABEL_DETECTION];
$operation = $video->annotateVideo([
'inputUri' => $uri,
'features' => $features,
]);
# Wait for the request to complete.
$operation->pollUntilComplete($options);
# Print the results.
if ($operation->operationSucceeded()) {
$results = $operation->getResult()->getAnnotationResults()[0];
# Process video/segment level label annotations
foreach ($results->getSegmentLabelAnnotations() as $label) {
printf('Video label description: %s' . PHP_EOL, $label->getEntity()->getDescription());
foreach ($label->getCategoryEntities() as $categoryEntity) {
printf(' Category: %s' . PHP_EOL, $categoryEntity->getDescription());
}
foreach ($label->getSegments() as $segment) {
$start = $segment->getSegment()->getStartTimeOffset();
$end = $segment->getSegment()->getEndTimeOffset();
printf(' Segment: %ss to %ss' . PHP_EOL,
$start->getSeconds() + $start->getNanos() / 1000000000.0,
$end->getSeconds() + $end->getNanos() / 1000000000.0);
printf(' Confidence: %f' . PHP_EOL, $segment->getConfidence());
}
}
print(PHP_EOL);
# Process shot level label annotations
foreach ($results->getShotLabelAnnotations() as $label) {
printf('Shot label description: %s' . PHP_EOL, $label->getEntity()->getDescription());
foreach ($label->getCategoryEntities() as $categoryEntity) {
printf(' Category: %s' . PHP_EOL, $categoryEntity->getDescription());
}
foreach ($label->getSegments() as $shot) {
$start = $shot->getSegment()->getStartTimeOffset();
$end = $shot->getSegment()->getEndTimeOffset();
printf(' Shot: %ss to %ss' . PHP_EOL,
$start->getSeconds() + $start->getNanos() / 1000000000.0,
$end->getSeconds() + $end->getNanos() / 1000000000.0);
printf(' Confidence: %f' . PHP_EOL, $shot->getConfidence());
}
}
print(PHP_EOL);
} else {
print_r($operation->getError());
}
Python
""" Detects labels given a GCS path. """
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.Feature.LABEL_DETECTION]
mode = videointelligence.LabelDetectionMode.SHOT_AND_FRAME_MODE
config = videointelligence.LabelDetectionConfig(label_detection_mode=mode)
context = videointelligence.VideoContext(label_detection_config=config)
operation = video_client.annotate_video(
request={"features": features, "input_uri": path, "video_context": context}
)
print("\nProcessing video for label annotations:")
result = operation.result(timeout=180)
print("\nFinished processing.")
# Process video/segment level label annotations
segment_labels = result.annotation_results[0].segment_label_annotations
for i, segment_label in enumerate(segment_labels):
print("Video label description: {}".format(segment_label.entity.description))
for category_entity in segment_label.category_entities:
print(
"\tLabel category description: {}".format(category_entity.description)
)
for i, segment in enumerate(segment_label.segments):
start_time = (
segment.segment.start_time_offset.seconds
+ segment.segment.start_time_offset.microseconds / 1e6
)
end_time = (
segment.segment.end_time_offset.seconds
+ segment.segment.end_time_offset.microseconds / 1e6
)
positions = "{}s to {}s".format(start_time, end_time)
confidence = segment.confidence
print("\tSegment {}: {}".format(i, positions))
print("\tConfidence: {}".format(confidence))
print("\n")
# Process shot level label annotations
shot_labels = result.annotation_results[0].shot_label_annotations
for i, shot_label in enumerate(shot_labels):
print("Shot label description: {}".format(shot_label.entity.description))
for category_entity in shot_label.category_entities:
print(
"\tLabel category description: {}".format(category_entity.description)
)
for i, shot in enumerate(shot_label.segments):
start_time = (
shot.segment.start_time_offset.seconds
+ shot.segment.start_time_offset.microseconds / 1e6
)
end_time = (
shot.segment.end_time_offset.seconds
+ shot.segment.end_time_offset.microseconds / 1e6
)
positions = "{}s to {}s".format(start_time, end_time)
confidence = shot.confidence
print("\tSegment {}: {}".format(i, positions))
print("\tConfidence: {}".format(confidence))
print("\n")
# Process frame level label annotations
frame_labels = result.annotation_results[0].frame_label_annotations
for i, frame_label in enumerate(frame_labels):
print("Frame label description: {}".format(frame_label.entity.description))
for category_entity in frame_label.category_entities:
print(
"\tLabel category description: {}".format(category_entity.description)
)
# Each frame_label_annotation has many frames,
# here we print information only about the first frame.
frame = frame_label.frames[0]
time_offset = frame.time_offset.seconds + frame.time_offset.microseconds / 1e6
print("\tFirst frame time offset: {}s".format(time_offset))
print("\tFirst frame confidence: {}".format(frame.confidence))
print("\n")
Étape suivante
Pour rechercher et filtrer des exemples de code pour d'autres produits Google Cloud, consultez l'explorateur d'exemples Google Cloud.