Detect labels in video on Cloud Storage

Detect labels in a video stored in Cloud Storage.

Documentation pages that include this code sample

To view the code sample used in context, see the following documentation:

Code sample

C#


using Google.Cloud.VideoIntelligence.V1;
using System;

namespace GoogleCloudSamples.VideoIntelligence
{
    public class QuickStart
    {
        public static void Main(string[] args)
        {
            var client = VideoIntelligenceServiceClient.Create();
            var request = new AnnotateVideoRequest()
            {
                InputUri = @"gs://cloud-samples-data/video/cat.mp4",
                Features = { Feature.LabelDetection }
            };
            var op = client.AnnotateVideo(request).PollUntilCompleted();
            foreach (var result in op.Result.AnnotationResults)
            {
                foreach (var annotation in result.SegmentLabelAnnotations)
                {
                    Console.WriteLine($"Video label: {annotation.Entity.Description}");
                    foreach (var entity in annotation.CategoryEntities)
                    {
                        Console.WriteLine($"Video label category: {entity.Description}");
                    }
                    foreach (var segment in annotation.Segments)
                    {
                        Console.Write("Segment location: ");
                        Console.Write(segment.Segment.StartTimeOffset);
                        Console.Write(":");
                        Console.WriteLine(segment.Segment.EndTimeOffset);
                        System.Console.WriteLine($"Confidence: {segment.Confidence}");
                    }
                }
            }
        }
    }
}

Go


// Sample video_quickstart uses the Google Cloud Video Intelligence API to label a video.
package main

import (
	"context"
	"fmt"
	"log"

	"github.com/golang/protobuf/ptypes"

	video "cloud.google.com/go/videointelligence/apiv1"
	videopb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1"
)

func main() {
	ctx := context.Background()

	// Creates a client.
	client, err := video.NewClient(ctx)
	if err != nil {
		log.Fatalf("Failed to create client: %v", err)
	}

	op, err := client.AnnotateVideo(ctx, &videopb.AnnotateVideoRequest{
		InputUri: "gs://cloud-samples-data/video/cat.mp4",
		Features: []videopb.Feature{
			videopb.Feature_LABEL_DETECTION,
		},
	})
	if err != nil {
		log.Fatalf("Failed to start annotation job: %v", err)
	}

	resp, err := op.Wait(ctx)
	if err != nil {
		log.Fatalf("Failed to annotate: %v", err)
	}

	// Only one video was processed, so get the first result.
	result := resp.GetAnnotationResults()[0]

	for _, annotation := range result.SegmentLabelAnnotations {
		fmt.Printf("Description: %s\n", annotation.Entity.Description)

		for _, category := range annotation.CategoryEntities {
			fmt.Printf("\tCategory: %s\n", category.Description)
		}

		for _, segment := range annotation.Segments {
			start, _ := ptypes.Duration(segment.Segment.StartTimeOffset)
			end, _ := ptypes.Duration(segment.Segment.EndTimeOffset)
			fmt.Printf("\tSegment: %s to %s\n", start, end)
			fmt.Printf("\tConfidence: %v\n", segment.Confidence)
		}
	}
}

Java


import com.google.api.gax.longrunning.OperationFuture;
import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
import com.google.cloud.videointelligence.v1.Entity;
import com.google.cloud.videointelligence.v1.Feature;
import com.google.cloud.videointelligence.v1.LabelAnnotation;
import com.google.cloud.videointelligence.v1.LabelSegment;
import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
import java.util.List;

public class QuickstartSample {

  /** Demonstrates using the video intelligence client to detect labels in a video file. */
  public static void main(String[] args) throws Exception {
    // Instantiate a video intelligence client
    try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
      // The Google Cloud Storage path to the video to annotate.
      String gcsUri = "gs://cloud-samples-data/video/cat.mp4";

      // Create an operation that will contain the response when the operation completes.
      AnnotateVideoRequest request =
          AnnotateVideoRequest.newBuilder()
              .setInputUri(gcsUri)
              .addFeatures(Feature.LABEL_DETECTION)
              .build();

      OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response =
          client.annotateVideoAsync(request);

      System.out.println("Waiting for operation to complete...");

      List<VideoAnnotationResults> results = response.get().getAnnotationResultsList();
      if (results.isEmpty()) {
        System.out.println("No labels detected in " + gcsUri);
        return;
      }
      for (VideoAnnotationResults result : results) {
        System.out.println("Labels:");
        // get video segment label annotations
        for (LabelAnnotation annotation : result.getSegmentLabelAnnotationsList()) {
          System.out.println(
              "Video label description : " + annotation.getEntity().getDescription());
          // categories
          for (Entity categoryEntity : annotation.getCategoryEntitiesList()) {
            System.out.println("Label Category description : " + categoryEntity.getDescription());
          }
          // segments
          for (LabelSegment segment : annotation.getSegmentsList()) {
            double startTime =
                segment.getSegment().getStartTimeOffset().getSeconds()
                    + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
            double endTime =
                segment.getSegment().getEndTimeOffset().getSeconds()
                    + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
            System.out.printf("Segment location : %.3f:%.3f\n", startTime, endTime);
            System.out.println("Confidence : " + segment.getConfidence());
          }
        }
      }
    }
  }
}

Node.js

// Imports the Google Cloud Video Intelligence library
const videoIntelligence = require('@google-cloud/video-intelligence');

// Creates a client
const client = new videoIntelligence.VideoIntelligenceServiceClient();

// The GCS uri of the video to analyze
const gcsUri = 'gs://cloud-samples-data/video/cat.mp4';

// Construct request
const request = {
  inputUri: gcsUri,
  features: ['LABEL_DETECTION'],
};

// Execute request
const [operation] = await client.annotateVideo(request);

console.log(
  'Waiting for operation to complete... (this may take a few minutes)'
);

const [operationResult] = await operation.promise();

// Gets annotations for video
const annotations = operationResult.annotationResults[0];

// Gets labels for video from its annotations
const labels = annotations.segmentLabelAnnotations;
labels.forEach(label => {
  console.log(`Label ${label.entity.description} occurs at:`);
  label.segments.forEach(segment => {
    segment = segment.segment;
    if (segment.startTimeOffset.seconds === undefined) {
      segment.startTimeOffset.seconds = 0;
    }
    if (segment.startTimeOffset.nanos === undefined) {
      segment.startTimeOffset.nanos = 0;
    }
    if (segment.endTimeOffset.seconds === undefined) {
      segment.endTimeOffset.seconds = 0;
    }
    if (segment.endTimeOffset.nanos === undefined) {
      segment.endTimeOffset.nanos = 0;
    }
    console.log(
      `\tStart: ${segment.startTimeOffset.seconds}` +
        `.${(segment.startTimeOffset.nanos / 1e6).toFixed(0)}s`
    );
    console.log(
      `\tEnd: ${segment.endTimeOffset.seconds}.` +
        `${(segment.endTimeOffset.nanos / 1e6).toFixed(0)}s`
    );
  });
});

PHP

use Google\Cloud\VideoIntelligence\V1\VideoIntelligenceServiceClient;
use Google\Cloud\VideoIntelligence\V1\Feature;

# Instantiate a client.
$video = new VideoIntelligenceServiceClient();

# Execute a request.
$options = [
    'inputUri' => 'gs://cloud-samples-data/video/cat.mp4',
    'features' => [Feature::LABEL_DETECTION]
];
$operation = $video->annotateVideo($options);

# Wait for the request to complete.
$operation->pollUntilComplete();

# Print the result.
if ($operation->operationSucceeded()) {
    $results = $operation->getResult()->getAnnotationResults()[0];
    # Process video/segment level label annotations
    foreach ($results->getSegmentLabelAnnotations() as $label) {
        printf('Video label description: %s' . PHP_EOL, $label->getEntity()->getDescription());
        foreach ($label->getCategoryEntities() as $categoryEntity) {
            printf('  Category: %s' . PHP_EOL, $categoryEntity->getDescription());
        }
        foreach ($label->getSegments() as $segment) {
            $start = $segment->getSegment()->getStartTimeOffset();
            $end = $segment->getSegment()->getEndTimeOffset();
            printf('  Segment: %ss to %ss' . PHP_EOL,
                $start->getSeconds() + $start->getNanos()/1000000000.0,
                $end->getSeconds() + $end->getNanos()/1000000000.0
            );
            printf('  Confidence: %f' . PHP_EOL, $segment->getConfidence());
        }
    }
} else {
    print_r($operation->getError());
}

Python

from google.cloud import videointelligence

video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.Feature.LABEL_DETECTION]
operation = video_client.annotate_video(
    request={
        "features": features,
        "input_uri": "gs://cloud-samples-data/video/cat.mp4",
    }
)
print("\nProcessing video for label annotations:")

result = operation.result(timeout=120)
print("\nFinished processing.")

# first result is retrieved because a single video was processed
segment_labels = result.annotation_results[0].segment_label_annotations
for i, segment_label in enumerate(segment_labels):
    print("Video label description: {}".format(segment_label.entity.description))
    for category_entity in segment_label.category_entities:
        print(
            "\tLabel category description: {}".format(category_entity.description)
        )

    for i, segment in enumerate(segment_label.segments):
        start_time = (
            segment.segment.start_time_offset.seconds
            + segment.segment.start_time_offset.microseconds / 1e6
        )
        end_time = (
            segment.segment.end_time_offset.seconds
            + segment.segment.end_time_offset.microseconds / 1e6
        )
        positions = "{}s to {}s".format(start_time, end_time)
        confidence = segment.confidence
        print("\tSegment {}: {}".format(i, positions))
        print("\tConfidence: {}".format(confidence))
    print("\n")

Ruby

require "google/cloud/video_intelligence"

video_client = Google::Cloud::VideoIntelligence.video_intelligence_service
features     = [:LABEL_DETECTION]
path         = "gs://cloud-samples-data/video/cat.mp4"

# Register a callback during the method call
operation = video_client.annotate_video features: features, input_uri: path

puts "Processing video for label annotations:"
operation.wait_until_done!

raise operation.results.message? if operation.error?
puts "Finished Processing."

labels = operation.results.annotation_results.first.segment_label_annotations

labels.each do |label|
  puts "Label description: #{label.entity.description}"

  label.category_entities.each do |category_entity|
    puts "Label category description: #{category_entity.description}"
  end

  label.segments.each do |segment|
    start_time = (segment.segment.start_time_offset.seconds +
                   segment.segment.start_time_offset.nanos / 1e9)
    end_time =   (segment.segment.end_time_offset.seconds +
                   segment.segment.end_time_offset.nanos / 1e9)

    puts "Segment: #{start_time} to #{end_time}"
    puts "Confidence: #{segment.confidence}"
  end
end