Como analisar rótulos de vídeos

A API Video Intelligence pode identificar entidades exibidas em filmagens usando o recurso LABEL_DETECTION. Esse recurso identifica objetos gerais, locais, atividades, espécies de animais, produtos e muito mais.

A análise pode ser dividida da seguinte maneira:

  • Nível do frame:
    entidades são identificadas e rotuladas dentro de cada frame (com amostragem de um frame por segundo).
  • Nível da imagem:
    as imagens são detectadas automaticamente em cada segmento (ou vídeo). As entidades são identificadas e rotuladas em cada imagem.
  • Nível do segmento:
    os segmentos selecionados por usuários de um vídeo podem ser especificados para análise estipulando carimbos de data/hora inicias e finais para fins de anotação (consulte VideoSegment). As entidades são identificadas e rotuladas dentro de cada segmento. Se nenhum segmento for especificado, o vídeo inteiro será tratado como um segmento.

Como anotar um arquivo local

Veja um exemplo de análise de rótulos de vídeo em um arquivo local.

Procurando informações mais detalhadas? Confira nosso tutorial detalhado do Python.

REST e LINHA DE CMD

Enviar a solicitação de processo

Veja a seguir como enviar uma solicitação POST para o método videos:annotate. É possível configurar o LabelDetectionMode para anotações no nível da imagem e/ou frame. Recomendamos o uso de SHOT_AND_FRAME_MODE O exemplo utiliza o token de acesso para uma conta de serviço configurada para o projeto com o SDK do Cloud. Consulte o Guia de início rápido da Video Intelligence para instruções de como instalar o SDK do Cloud, configurar um projeto com uma conta de serviço e conseguir um token de acesso.

Antes de usar os dados da solicitação abaixo, faça as substituições a seguir:

Método HTTP e URL:

POST https://videointelligence.googleapis.com/v1/videos:annotate

Corpo JSON da solicitação:

{
  "inputContent": "base64-encoded-content",
  "features": ["LABEL_DETECTION"],
}

Para enviar a solicitação, expanda uma destas opções:

Você receberá uma resposta JSON semelhante a esta:

{
  "name": "projects/project-number/locations/location-id/operations/operation-id"
}

Se a solicitação for bem-sucedida, a Video Intelligence retornará o nome da operação.

Ver os resultados

Para ver os resultados da sua solicitação, você precisa enviar uma solicitação GET para o recurso projects.locations.operations. Veja a seguir como enviar essa solicitação.

Antes de usar os dados da solicitação abaixo, faça as substituições a seguir:

  • operation-name: o nome da operação, conforme retornado pela API Video Intelligence. O nome da operação tem o formato projects/project-number/locations/location-id/operations/operation-id.

Método HTTP e URL:

GET https://videointelligence.googleapis.com/v1/operation-name

Para enviar a solicitação, expanda uma destas opções:

Você receberá uma resposta JSON semelhante a esta:

C#

public static object AnalyzeLabels(string path)
{
    var client = VideoIntelligenceServiceClient.Create();
    var request = new AnnotateVideoRequest()
    {
        InputContent = Google.Protobuf.ByteString.CopyFrom(File.ReadAllBytes(path)),
        Features = { Feature.LabelDetection },
    };
    var op = client.AnnotateVideo(request).PollUntilCompleted();
    foreach (var result in op.Result.AnnotationResults)
    {
        PrintLabels("Video", result.SegmentLabelAnnotations);
        PrintLabels("Shot", result.ShotLabelAnnotations);
        PrintLabels("Frame", result.FrameLabelAnnotations);
    }
    return 0;
}

static void PrintLabels(string labelName,
    IEnumerable<LabelAnnotation> labelAnnotations)
{
    foreach (var annotation in labelAnnotations)
    {
        Console.WriteLine($"{labelName} label: {annotation.Entity.Description}");
        foreach (var entity in annotation.CategoryEntities)
        {
            Console.WriteLine($"{labelName} label category: {entity.Description}");
        }
        foreach (var segment in annotation.Segments)
        {
            Console.Write("Segment location: ");
            Console.Write(segment.Segment.StartTimeOffset);
            Console.Write(":");
            Console.WriteLine(segment.Segment.EndTimeOffset);
            System.Console.WriteLine($"Confidence: {segment.Confidence}");
        }
    }
}

Go


func label(w io.Writer, file string) error {
	ctx := context.Background()
	client, err := video.NewClient(ctx)
	if err != nil {
		return fmt.Errorf("video.NewClient: %v", err)
	}

	fileBytes, err := ioutil.ReadFile(file)
	if err != nil {
		return err
	}

	op, err := client.AnnotateVideo(ctx, &videopb.AnnotateVideoRequest{
		Features: []videopb.Feature{
			videopb.Feature_LABEL_DETECTION,
		},
		InputContent: fileBytes,
	})
	if err != nil {
		return fmt.Errorf("AnnotateVideo: %v", err)
	}

	resp, err := op.Wait(ctx)
	if err != nil {
		return fmt.Errorf("Wait: %v", err)
	}

	printLabels := func(labels []*videopb.LabelAnnotation) {
		for _, label := range labels {
			fmt.Fprintf(w, "\tDescription: %s\n", label.Entity.Description)
			for _, category := range label.CategoryEntities {
				fmt.Fprintf(w, "\t\tCategory: %s\n", category.Description)
			}
			for _, segment := range label.Segments {
				start, _ := ptypes.Duration(segment.Segment.StartTimeOffset)
				end, _ := ptypes.Duration(segment.Segment.EndTimeOffset)
				fmt.Fprintf(w, "\t\tSegment: %s to %s\n", start, end)
			}
		}
	}

	// A single video was processed. Get the first result.
	result := resp.AnnotationResults[0]

	fmt.Fprintln(w, "SegmentLabelAnnotations:")
	printLabels(result.SegmentLabelAnnotations)
	fmt.Fprintln(w, "ShotLabelAnnotations:")
	printLabels(result.ShotLabelAnnotations)
	fmt.Fprintln(w, "FrameLabelAnnotations:")
	printLabels(result.FrameLabelAnnotations)

	return nil
}

Java

// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
  // Read file and encode into Base64
  Path path = Paths.get(filePath);
  byte[] data = Files.readAllBytes(path);

  AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
      .setInputContent(ByteString.copyFrom(data))
      .addFeatures(Feature.LABEL_DETECTION)
      .build();
  // Create an operation that will contain the response when the operation completes.
  OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response =
      client.annotateVideoAsync(request);

  System.out.println("Waiting for operation to complete...");
  for (VideoAnnotationResults results : response.get().getAnnotationResultsList()) {
    // process video / segment level label annotations
    System.out.println("Locations: ");
    for (LabelAnnotation labelAnnotation : results.getSegmentLabelAnnotationsList()) {
      System.out
          .println("Video label: " + labelAnnotation.getEntity().getDescription());
      // categories
      for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
        System.out.println("Video label category: " + categoryEntity.getDescription());
      }
      // segments
      for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
        double startTime = segment.getSegment().getStartTimeOffset().getSeconds()
            + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
        double endTime = segment.getSegment().getEndTimeOffset().getSeconds()
            + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
        System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
        System.out.println("Confidence: " + segment.getConfidence());
      }
    }

    // process shot label annotations
    for (LabelAnnotation labelAnnotation : results.getShotLabelAnnotationsList()) {
      System.out
          .println("Shot label: " + labelAnnotation.getEntity().getDescription());
      // categories
      for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
        System.out.println("Shot label category: " + categoryEntity.getDescription());
      }
      // segments
      for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
        double startTime = segment.getSegment().getStartTimeOffset().getSeconds()
            + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
        double endTime = segment.getSegment().getEndTimeOffset().getSeconds()
            + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
        System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
        System.out.println("Confidence: " + segment.getConfidence());
      }
    }

    // process frame label annotations
    for (LabelAnnotation labelAnnotation : results.getFrameLabelAnnotationsList()) {
      System.out
          .println("Frame label: " + labelAnnotation.getEntity().getDescription());
      // categories
      for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
        System.out.println("Frame label category: " + categoryEntity.getDescription());
      }
      // segments
      for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
        double startTime = segment.getSegment().getStartTimeOffset().getSeconds()
            + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
        double endTime = segment.getSegment().getEndTimeOffset().getSeconds()
            + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
        System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
        System.out.println("Confidence: " + segment.getConfidence());
      }
    }
  }
}

Node.js

// Imports the Google Cloud Video Intelligence library + Node's fs library
const video = require('@google-cloud/video-intelligence').v1;
const fs = require('fs');
const util = require('util');

// Creates a client
const client = new video.VideoIntelligenceServiceClient();

/**
 * TODO(developer): Uncomment the following line before running the sample.
 */
// const path = 'Local file to analyze, e.g. ./my-file.mp4';

// Reads a local video file and converts it to base64
const readFile = util.promisify(fs.readFile);
const file = await readFile(path);
const inputContent = file.toString('base64');

// Constructs request
const request = {
  inputContent: inputContent,
  features: ['LABEL_DETECTION'],
};

// Detects labels in a video
const [operation] = await client.annotateVideo(request);
console.log('Waiting for operation to complete...');
const [operationResult] = await operation.promise();
// Gets annotations for video
const annotations = operationResult.annotationResults[0];

const labels = annotations.segmentLabelAnnotations;
labels.forEach(label => {
  console.log(`Label ${label.entity.description} occurs at:`);
  label.segments.forEach(segment => {
    const time = segment.segment;
    if (time.startTimeOffset.seconds === undefined) {
      time.startTimeOffset.seconds = 0;
    }
    if (time.startTimeOffset.nanos === undefined) {
      time.startTimeOffset.nanos = 0;
    }
    if (time.endTimeOffset.seconds === undefined) {
      time.endTimeOffset.seconds = 0;
    }
    if (time.endTimeOffset.nanos === undefined) {
      time.endTimeOffset.nanos = 0;
    }
    console.log(
      `\tStart: ${time.startTimeOffset.seconds}` +
        `.${(time.startTimeOffset.nanos / 1e6).toFixed(0)}s`
    );
    console.log(
      `\tEnd: ${time.endTimeOffset.seconds}.` +
        `${(time.endTimeOffset.nanos / 1e6).toFixed(0)}s`
    );
    console.log(`\tConfidence: ${segment.confidence}`);
  });
});

Python

Para mais informações sobre como instalar e usar a biblioteca de cliente da API Cloud Video Intelligence para Python, consulte Bibliotecas de cliente da API Cloud Video Intelligence.
"""Detect labels given a file path."""
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.LABEL_DETECTION]

with io.open(path, "rb") as movie:
    input_content = movie.read()

operation = video_client.annotate_video(
    features=features, input_content=input_content
)
print("\nProcessing video for label annotations:")

result = operation.result(timeout=90)
print("\nFinished processing.")

# Process video/segment level label annotations
segment_labels = result.annotation_results[0].segment_label_annotations
for i, segment_label in enumerate(segment_labels):
    print("Video label description: {}".format(segment_label.entity.description))
    for category_entity in segment_label.category_entities:
        print(
            "\tLabel category description: {}".format(category_entity.description)
        )

    for i, segment in enumerate(segment_label.segments):
        start_time = (
            segment.segment.start_time_offset.seconds
            + segment.segment.start_time_offset.nanos / 1e9
        )
        end_time = (
            segment.segment.end_time_offset.seconds
            + segment.segment.end_time_offset.nanos / 1e9
        )
        positions = "{}s to {}s".format(start_time, end_time)
        confidence = segment.confidence
        print("\tSegment {}: {}".format(i, positions))
        print("\tConfidence: {}".format(confidence))
    print("\n")

# Process shot level label annotations
shot_labels = result.annotation_results[0].shot_label_annotations
for i, shot_label in enumerate(shot_labels):
    print("Shot label description: {}".format(shot_label.entity.description))
    for category_entity in shot_label.category_entities:
        print(
            "\tLabel category description: {}".format(category_entity.description)
        )

    for i, shot in enumerate(shot_label.segments):
        start_time = (
            shot.segment.start_time_offset.seconds
            + shot.segment.start_time_offset.nanos / 1e9
        )
        end_time = (
            shot.segment.end_time_offset.seconds
            + shot.segment.end_time_offset.nanos / 1e9
        )
        positions = "{}s to {}s".format(start_time, end_time)
        confidence = shot.confidence
        print("\tSegment {}: {}".format(i, positions))
        print("\tConfidence: {}".format(confidence))
    print("\n")

# Process frame level label annotations
frame_labels = result.annotation_results[0].frame_label_annotations
for i, frame_label in enumerate(frame_labels):
    print("Frame label description: {}".format(frame_label.entity.description))
    for category_entity in frame_label.category_entities:
        print(
            "\tLabel category description: {}".format(category_entity.description)
        )

    # Each frame_label_annotation has many frames,
    # here we print information only about the first frame.
    frame = frame_label.frames[0]
    time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
    print("\tFirst frame time offset: {}s".format(time_offset))
    print("\tFirst frame confidence: {}".format(frame.confidence))
    print("\n")

PHP

use Google\Cloud\VideoIntelligence\V1\VideoIntelligenceServiceClient;
use Google\Cloud\VideoIntelligence\V1\Feature;

/** Uncomment and populate these variables in your code */
// $path = 'File path to a video file to analyze';
// $options = [];

# Instantiate a client.
$video = new VideoIntelligenceServiceClient();

# Read the local video file
$inputContent = file_get_contents($path);

# Execute a request.
$operation = $video->annotateVideo([
    'inputContent' => $inputContent,
    'features' => [Feature::LABEL_DETECTION]
]);

# Wait for the request to complete.
$operation->pollUntilComplete($options);

# Print the results.
if ($operation->operationSucceeded()) {
    $results = $operation->getResult()->getAnnotationResults()[0];

    # Process video/segment level label annotations
    foreach ($results->getSegmentLabelAnnotations() as $label) {
        printf('Video label description: %s' . PHP_EOL, $label->getEntity()->getDescription());
        foreach ($label->getCategoryEntities() as $categoryEntity) {
            printf('  Category: %s' . PHP_EOL, $categoryEntity->getDescription());
        }
        foreach ($label->getSegments() as $segment) {
            $start = $segment->getSegment()->getStartTimeOffset();
            $end = $segment->getSegment()->getEndTimeOffset();
            printf('  Segment: %ss to %ss' . PHP_EOL,
                $start->getSeconds() + $start->getNanos()/1000000000.0,
                $end->getSeconds() + $end->getNanos()/1000000000.0);
            printf('  Confidence: %f' . PHP_EOL, $segment->getConfidence());
        }
    }
    print(PHP_EOL);

    # Process shot level label annotations
    foreach ($results->getShotLabelAnnotations() as $label) {
        printf('Shot label description: %s' . PHP_EOL, $label->getEntity()->getDescription());
        foreach ($label->getCategoryEntities() as $categoryEntity) {
            printf('  Category: %s' . PHP_EOL, $categoryEntity->getDescription());
        }
        foreach ($label->getSegments() as $shot) {
            $start = $shot->getSegment()->getStartTimeOffset();
            $end = $shot->getSegment()->getEndTimeOffset();
            printf('  Shot: %ss to %ss' . PHP_EOL,
                $start->getSeconds() + $start->getNanos()/1000000000.0,
                $end->getSeconds() + $end->getNanos()/1000000000.0);
            printf('  Confidence: %f' . PHP_EOL, $shot->getConfidence());
        }
    }
    print(PHP_EOL);
} else {
    print_r($operation->getError());
}

Ruby

# path = "Path to a local video file: path/to/file.mp4"

require "google/cloud/video_intelligence"

video = Google::Cloud::VideoIntelligence.video_intelligence_service

video_contents = File.binread path

# Register a callback during the method call
operation = video.annotate_video features: [:LABEL_DETECTION], input_content: video_contents

puts "Processing video for label annotations:"
operation.wait_until_done!

raise operation.results.message? if operation.error?
puts "Finished Processing."

labels = operation.results.annotation_results.first.segment_label_annotations
print_labels labels

Como anotar um arquivo no Cloud Storage

Veja um exemplo de análise de vídeo para rótulos em um arquivo localizado no Cloud Storage.

REST e LINHA DE CMD

Enviar a solicitação de processo

Veja a seguir como enviar uma solicitação POST para o método annotate. O exemplo utiliza o token de acesso para uma conta de serviço configurada para o projeto com o SDK do Cloud. Consulte o Guia de início rápido da Video Intelligence para instruções de como instalar o SDK do Cloud, configurar um projeto com uma conta de serviço e conseguir um token de acesso.

Antes de usar os dados da solicitação abaixo, faça as substituições a seguir:

  • input-uri: um bucket do Cloud Storage que contém o arquivo que você quer anotar, incluindo o nome do arquivo. É necessário começar com gs://.

Método HTTP e URL:

POST https://videointelligence.googleapis.com/v1/videos:annotate

Corpo JSON da solicitação:

{
  "inputUri": "input-uri",
  "features": ["LABEL_DETECTION"],
}

Para enviar a solicitação, expanda uma destas opções:

Você receberá uma resposta JSON semelhante a esta:

{
  "name": "projects/project-number/locations/location-id/operations/operation-id"
}

Se a solicitação for bem-sucedida, a Video Intelligence retornará o nome da operação.

Ver os resultados

Para ver os resultados da sua solicitação, você precisa enviar uma solicitação GET para o recurso projects.locations.operations. Veja a seguir como enviar essa solicitação.

Antes de usar os dados da solicitação abaixo, faça as substituições a seguir:

  • operation-name: o nome da operação, conforme retornado pela API Video Intelligence. O nome da operação tem o formato projects/project-number/locations/location-id/operations/operation-id.

Método HTTP e URL:

GET https://videointelligence.googleapis.com/v1/operation-name

Para enviar a solicitação, expanda uma destas opções:

Você receberá uma resposta JSON semelhante a esta:

C#

public static object AnalyzeLabelsGcs(string uri)
{
    var client = VideoIntelligenceServiceClient.Create();
    var request = new AnnotateVideoRequest()
    {
        InputUri = uri,
        Features = { Feature.LabelDetection }
    };
    var op = client.AnnotateVideo(request).PollUntilCompleted();
    foreach (var result in op.Result.AnnotationResults)
    {
        PrintLabels("Video", result.SegmentLabelAnnotations);
        PrintLabels("Shot", result.ShotLabelAnnotations);
        PrintLabels("Frame", result.FrameLabelAnnotations);
    }
    return 0;
}

static void PrintLabels(string labelName,
    IEnumerable<LabelAnnotation> labelAnnotations)
{
    foreach (var annotation in labelAnnotations)
    {
        Console.WriteLine($"{labelName} label: {annotation.Entity.Description}");
        foreach (var entity in annotation.CategoryEntities)
        {
            Console.WriteLine($"{labelName} label category: {entity.Description}");
        }
        foreach (var segment in annotation.Segments)
        {
            Console.Write("Segment location: ");
            Console.Write(segment.Segment.StartTimeOffset);
            Console.Write(":");
            Console.WriteLine(segment.Segment.EndTimeOffset);
            System.Console.WriteLine($"Confidence: {segment.Confidence}");
        }
    }
}

Go


func labelURI(w io.Writer, file string) error {
	ctx := context.Background()
	client, err := video.NewClient(ctx)
	if err != nil {
		return fmt.Errorf("video.NewClient: %v", err)
	}

	op, err := client.AnnotateVideo(ctx, &videopb.AnnotateVideoRequest{
		Features: []videopb.Feature{
			videopb.Feature_LABEL_DETECTION,
		},
		InputUri: file,
	})
	if err != nil {
		return fmt.Errorf("AnnotateVideo: %v", err)
	}

	resp, err := op.Wait(ctx)
	if err != nil {
		return fmt.Errorf("Wait: %v", err)
	}

	printLabels := func(labels []*videopb.LabelAnnotation) {
		for _, label := range labels {
			fmt.Fprintf(w, "\tDescription: %s\n", label.Entity.Description)
			for _, category := range label.CategoryEntities {
				fmt.Fprintf(w, "\t\tCategory: %s\n", category.Description)
			}
			for _, segment := range label.Segments {
				start, _ := ptypes.Duration(segment.Segment.StartTimeOffset)
				end, _ := ptypes.Duration(segment.Segment.EndTimeOffset)
				fmt.Fprintf(w, "\t\tSegment: %s to %s\n", start, end)
			}
		}
	}

	// A single video was processed. Get the first result.
	result := resp.AnnotationResults[0]

	fmt.Fprintln(w, "SegmentLabelAnnotations:")
	printLabels(result.SegmentLabelAnnotations)
	fmt.Fprintln(w, "ShotLabelAnnotations:")
	printLabels(result.ShotLabelAnnotations)
	fmt.Fprintln(w, "FrameLabelAnnotations:")
	printLabels(result.FrameLabelAnnotations)

	return nil
}

Java

// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
  // Provide path to file hosted on GCS as "gs://bucket-name/..."
  AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
      .setInputUri(gcsUri)
      .addFeatures(Feature.LABEL_DETECTION)
      .build();
  // Create an operation that will contain the response when the operation completes.
  OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response =
      client.annotateVideoAsync(request);

  System.out.println("Waiting for operation to complete...");
  for (VideoAnnotationResults results : response.get().getAnnotationResultsList()) {
    // process video / segment level label annotations
    System.out.println("Locations: ");
    for (LabelAnnotation labelAnnotation : results.getSegmentLabelAnnotationsList()) {
      System.out
          .println("Video label: " + labelAnnotation.getEntity().getDescription());
      // categories
      for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
        System.out.println("Video label category: " + categoryEntity.getDescription());
      }
      // segments
      for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
        double startTime = segment.getSegment().getStartTimeOffset().getSeconds()
            + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
        double endTime = segment.getSegment().getEndTimeOffset().getSeconds()
            + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
        System.out.printf("Segment location: %.3f:%.3f\n", startTime, endTime);
        System.out.println("Confidence: " + segment.getConfidence());
      }
    }

    // process shot label annotations
    for (LabelAnnotation labelAnnotation : results.getShotLabelAnnotationsList()) {
      System.out
          .println("Shot label: " + labelAnnotation.getEntity().getDescription());
      // categories
      for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
        System.out.println("Shot label category: " + categoryEntity.getDescription());
      }
      // segments
      for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
        double startTime = segment.getSegment().getStartTimeOffset().getSeconds()
            + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
        double endTime = segment.getSegment().getEndTimeOffset().getSeconds()
            + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
        System.out.printf("Segment location: %.3f:%.3f\n", startTime, endTime);
        System.out.println("Confidence: " + segment.getConfidence());
      }
    }

    // process frame label annotations
    for (LabelAnnotation labelAnnotation : results.getFrameLabelAnnotationsList()) {
      System.out
          .println("Frame label: " + labelAnnotation.getEntity().getDescription());
      // categories
      for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
        System.out.println("Frame label category: " + categoryEntity.getDescription());
      }
      // segments
      for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
        double startTime = segment.getSegment().getStartTimeOffset().getSeconds()
            + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
        double endTime = segment.getSegment().getEndTimeOffset().getSeconds()
            + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
        System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
        System.out.println("Confidence: " + segment.getConfidence());
      }
    }
  }
}

Node.js

// Imports the Google Cloud Video Intelligence library
const video = require('@google-cloud/video-intelligence').v1;

// Creates a client
const client = new video.VideoIntelligenceServiceClient();

/**
 * TODO(developer): Uncomment the following line before running the sample.
 */
// const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4';

const request = {
  inputUri: gcsUri,
  features: ['LABEL_DETECTION'],
};

// Detects labels in a video
const [operation] = await client.annotateVideo(request);
console.log('Waiting for operation to complete...');
const [operationResult] = await operation.promise();

// Gets annotations for video
const annotations = operationResult.annotationResults[0];

const labels = annotations.segmentLabelAnnotations;
labels.forEach(label => {
  console.log(`Label ${label.entity.description} occurs at:`);
  label.segments.forEach(segment => {
    const time = segment.segment;
    if (time.startTimeOffset.seconds === undefined) {
      time.startTimeOffset.seconds = 0;
    }
    if (time.startTimeOffset.nanos === undefined) {
      time.startTimeOffset.nanos = 0;
    }
    if (time.endTimeOffset.seconds === undefined) {
      time.endTimeOffset.seconds = 0;
    }
    if (time.endTimeOffset.nanos === undefined) {
      time.endTimeOffset.nanos = 0;
    }
    console.log(
      `\tStart: ${time.startTimeOffset.seconds}` +
        `.${(time.startTimeOffset.nanos / 1e6).toFixed(0)}s`
    );
    console.log(
      `\tEnd: ${time.endTimeOffset.seconds}.` +
        `${(time.endTimeOffset.nanos / 1e6).toFixed(0)}s`
    );
    console.log(`\tConfidence: ${segment.confidence}`);
  });
});

Python

""" Detects labels given a GCS path. """
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.LABEL_DETECTION]

mode = videointelligence.enums.LabelDetectionMode.SHOT_AND_FRAME_MODE
config = videointelligence.types.LabelDetectionConfig(label_detection_mode=mode)
context = videointelligence.types.VideoContext(label_detection_config=config)

operation = video_client.annotate_video(
    input_uri=path, features=features, video_context=context
)
print("\nProcessing video for label annotations:")

result = operation.result(timeout=180)
print("\nFinished processing.")

# Process video/segment level label annotations
segment_labels = result.annotation_results[0].segment_label_annotations
for i, segment_label in enumerate(segment_labels):
    print("Video label description: {}".format(segment_label.entity.description))
    for category_entity in segment_label.category_entities:
        print(
            "\tLabel category description: {}".format(category_entity.description)
        )

    for i, segment in enumerate(segment_label.segments):
        start_time = (
            segment.segment.start_time_offset.seconds
            + segment.segment.start_time_offset.nanos / 1e9
        )
        end_time = (
            segment.segment.end_time_offset.seconds
            + segment.segment.end_time_offset.nanos / 1e9
        )
        positions = "{}s to {}s".format(start_time, end_time)
        confidence = segment.confidence
        print("\tSegment {}: {}".format(i, positions))
        print("\tConfidence: {}".format(confidence))
    print("\n")

# Process shot level label annotations
shot_labels = result.annotation_results[0].shot_label_annotations
for i, shot_label in enumerate(shot_labels):
    print("Shot label description: {}".format(shot_label.entity.description))
    for category_entity in shot_label.category_entities:
        print(
            "\tLabel category description: {}".format(category_entity.description)
        )

    for i, shot in enumerate(shot_label.segments):
        start_time = (
            shot.segment.start_time_offset.seconds
            + shot.segment.start_time_offset.nanos / 1e9
        )
        end_time = (
            shot.segment.end_time_offset.seconds
            + shot.segment.end_time_offset.nanos / 1e9
        )
        positions = "{}s to {}s".format(start_time, end_time)
        confidence = shot.confidence
        print("\tSegment {}: {}".format(i, positions))
        print("\tConfidence: {}".format(confidence))
    print("\n")

# Process frame level label annotations
frame_labels = result.annotation_results[0].frame_label_annotations
for i, frame_label in enumerate(frame_labels):
    print("Frame label description: {}".format(frame_label.entity.description))
    for category_entity in frame_label.category_entities:
        print(
            "\tLabel category description: {}".format(category_entity.description)
        )

    # Each frame_label_annotation has many frames,
    # here we print information only about the first frame.
    frame = frame_label.frames[0]
    time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
    print("\tFirst frame time offset: {}s".format(time_offset))
    print("\tFirst frame confidence: {}".format(frame.confidence))
    print("\n")

PHP

use Google\Cloud\VideoIntelligence\V1\VideoIntelligenceServiceClient;
use Google\Cloud\VideoIntelligence\V1\Feature;

/** Uncomment and populate these variables in your code */
// $uri = 'The cloud storage object to analyze (gs://your-bucket-name/your-object-name)';
// $options = [];

# Instantiate a client.
$video = new VideoIntelligenceServiceClient();

# Execute a request.
$operation = $video->annotateVideo([
    'inputUri' => $uri,
    'features' => [Feature::LABEL_DETECTION]
]);

# Wait for the request to complete.
$operation->pollUntilComplete($options);

# Print the results.
if ($operation->operationSucceeded()) {
    $results = $operation->getResult()->getAnnotationResults()[0];

    # Process video/segment level label annotations
    foreach ($results->getSegmentLabelAnnotations() as $label) {
        printf('Video label description: %s' . PHP_EOL, $label->getEntity()->getDescription());
        foreach ($label->getCategoryEntities() as $categoryEntity) {
            printf('  Category: %s' . PHP_EOL, $categoryEntity->getDescription());
        }
        foreach ($label->getSegments() as $segment) {
            $start = $segment->getSegment()->getStartTimeOffset();
            $end = $segment->getSegment()->getEndTimeOffset();
            printf('  Segment: %ss to %ss' . PHP_EOL,
                $start->getSeconds() + $start->getNanos()/1000000000.0,
                $end->getSeconds() + $end->getNanos()/1000000000.0);
            printf('  Confidence: %f' . PHP_EOL, $segment->getConfidence());
        }
    }
    print(PHP_EOL);

    # Process shot level label annotations
    foreach ($results->getShotLabelAnnotations() as $label) {
        printf('Shot label description: %s' . PHP_EOL, $label->getEntity()->getDescription());
        foreach ($label->getCategoryEntities() as $categoryEntity) {
            printf('  Category: %s' . PHP_EOL, $categoryEntity->getDescription());
        }
        foreach ($label->getSegments() as $shot) {
            $start = $shot->getSegment()->getStartTimeOffset();
            $end = $shot->getSegment()->getEndTimeOffset();
            printf('  Shot: %ss to %ss' . PHP_EOL,
                $start->getSeconds() + $start->getNanos()/1000000000.0,
                $end->getSeconds() + $end->getNanos()/1000000000.0);
            printf('  Confidence: %f' . PHP_EOL, $shot->getConfidence());
        }
    }
    print(PHP_EOL);
} else {
    print_r($operation->getError());
}

Ruby

# path = "Path to a video file on Google Cloud Storage: gs://bucket/video.mp4"

require "google/cloud/video_intelligence"

video = Google::Cloud::VideoIntelligence.video_intelligence_service

# Register a callback during the method call
operation = video.annotate_video features: [:LABEL_DETECTION], input_uri: path
puts "Processing video for label annotations:"
operation.wait_until_done!

raise operation.results.message? if operation.error?
puts "Finished Processing."

labels = operation.results.annotation_results.first.segment_label_annotations
print_labels labels