Clasificación de videos

La clasificación de videos identifica objetos, ubicaciones, actividades, especies de animales, productos y mucho más.

Usar video de AutoML

Antes de comenzar

Si deseas obtener información sobre cómo crear un modelo de AutoML, consulta la guía para principiantes de Vertex AI. Para obtener instrucciones sobre cómo crear tu modelo de AutoML, consulta Datos de video en "Desarrolla y usa modelos de AA" en la documentación de Vertex AI.

Usa tu modelo de AutoML

El siguiente muestra de código demuestra cómo usar tu Modelo de AutoML para la clasificación de videos con la biblioteca cliente de transmisión.

Java

Para autenticarte en Video Intelligence, configura las credenciales predeterminadas de la aplicación. Si deseas obtener más información, consulta Configura la autenticación para un entorno de desarrollo local.


import com.google.api.gax.rpc.BidiStream;
import com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation;
import com.google.cloud.videointelligence.v1p3beta1.LabelFrame;
import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest;
import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse;
import com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig;
import com.google.cloud.videointelligence.v1p3beta1.StreamingFeature;
import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults;
import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig;
import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient;
import com.google.protobuf.ByteString;
import io.grpc.StatusRuntimeException;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.concurrent.TimeoutException;

class StreamingAutoMlClassification {

  // Perform streaming video classification with an AutoML Model
  static void streamingAutoMlClassification(String filePath, String projectId, String modelId)
      throws TimeoutException, StatusRuntimeException, IOException {
    // String filePath = "path_to_your_video_file";
    // String projectId = "YOUR_GCP_PROJECT_ID";
    // String modelId = "YOUR_AUTO_ML_CLASSIFICATION_MODEL_ID";

    try (StreamingVideoIntelligenceServiceClient client =
        StreamingVideoIntelligenceServiceClient.create()) {

      Path path = Paths.get(filePath);
      byte[] data = Files.readAllBytes(path);
      // Set the chunk size to 5MB (recommended less than 10MB).
      int chunkSize = 5 * 1024 * 1024;
      int numChunks = (int) Math.ceil((double) data.length / chunkSize);

      String modelPath =
          String.format("projects/%s/locations/us-central1/models/%s", projectId, modelId);

      System.out.println(modelPath);

      StreamingAutomlClassificationConfig streamingAutomlClassificationConfig =
          StreamingAutomlClassificationConfig.newBuilder().setModelName(modelPath).build();

      StreamingVideoConfig streamingVideoConfig =
          StreamingVideoConfig.newBuilder()
              .setFeature(StreamingFeature.STREAMING_AUTOML_CLASSIFICATION)
              .setAutomlClassificationConfig(streamingAutomlClassificationConfig)
              .build();

      BidiStream<StreamingAnnotateVideoRequest, StreamingAnnotateVideoResponse> call =
          client.streamingAnnotateVideoCallable().call();

      // The first request must **only** contain the audio configuration:
      call.send(
          StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());

      // Subsequent requests must **only** contain the audio data.
      // Send the requests in chunks
      for (int i = 0; i < numChunks; i++) {
        call.send(
            StreamingAnnotateVideoRequest.newBuilder()
                .setInputContent(
                    ByteString.copyFrom(
                        Arrays.copyOfRange(data, i * chunkSize, i * chunkSize + chunkSize)))
                .build());
      }

      // Tell the service you are done sending data
      call.closeSend();

      for (StreamingAnnotateVideoResponse response : call) {
        if (response.hasError()) {
          System.out.println(response.getError().getMessage());
          break;
        }

        StreamingVideoAnnotationResults annotationResults = response.getAnnotationResults();

        for (LabelAnnotation annotation : annotationResults.getLabelAnnotationsList()) {
          String entity = annotation.getEntity().getDescription();

          // There is only one frame per annotation
          LabelFrame labelFrame = annotation.getFrames(0);
          double offset =
              labelFrame.getTimeOffset().getSeconds() + labelFrame.getTimeOffset().getNanos() / 1e9;
          float confidence = labelFrame.getConfidence();

          System.out.format("At %fs segment: %s (%f)\n", offset, entity, confidence);
        }
      }
      System.out.println("Video streamed successfully.");
    }
  }
}

Node.js

Para autenticarte en Video Intelligence, configura las credenciales predeterminadas de la aplicación. Si deseas obtener más información, consulta Configura la autenticación para un entorno de desarrollo local.

/**
 * TODO(developer): Uncomment these variables before running the sample.
 */
// const path = 'Local file to analyze, e.g. ./my-file.mp4';
// const modelId = 'autoMl model'
// const projectId = 'Your GCP Project'

const {StreamingVideoIntelligenceServiceClient} =
  require('@google-cloud/video-intelligence').v1p3beta1;
const fs = require('fs');

// Instantiates a client
const client = new StreamingVideoIntelligenceServiceClient();

// Streaming configuration
const modelPath = `projects/${projectId}/locations/us-central1/models/${modelId}`;
const configRequest = {
  videoConfig: {
    feature: 'STREAMING_AUTOML_CLASSIFICATION',
    automlClassificationConfig: {
      modelName: modelPath,
    },
  },
};

const readStream = fs.createReadStream(path, {
  highWaterMark: 5 * 1024 * 1024, //chunk size set to 5MB (recommended less than 10MB)
  encoding: 'base64',
});
//Load file content
// Note: Input videos must have supported video codecs. See
// https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs
// for more details.
const chunks = [];
readStream
  .on('data', chunk => {
    const request = {
      inputContent: chunk.toString(),
    };
    chunks.push(request);
  })
  .on('close', () => {
    // configRequest should be the first in the stream of requests
    stream.write(configRequest);
    for (let i = 0; i < chunks.length; i++) {
      stream.write(chunks[i]);
    }
    stream.end();
  });

const stream = client
  .streamingAnnotateVideo()
  .on('data', response => {
    //Gets annotations for video
    const annotations = response.annotationResults;
    const labels = annotations.labelAnnotations;
    labels.forEach(label => {
      console.log(
        `Label ${label.entity.description} occurs at: ${
          label.frames[0].timeOffset.seconds || 0
        }` + `.${(label.frames[0].timeOffset.nanos / 1e6).toFixed(0)}s`
      );
      console.log(` Confidence: ${label.frames[0].confidence}`);
    });
  })
  .on('error', response => {
    console.error(response);
  });

Python

Para autenticarte en Video Intelligence, configura las credenciales predeterminadas de la aplicación. Si deseas obtener más información, consulta Configura la autenticación para un entorno de desarrollo local.

import io

from google.cloud import videointelligence_v1p3beta1 as videointelligence

# path = 'path_to_file'
# project_id = 'gcp_project_id'
# model_id = 'automl_classification_model_id'

client = videointelligence.StreamingVideoIntelligenceServiceClient()

model_path = "projects/{}/locations/us-central1/models/{}".format(
    project_id, model_id
)

# Here we use classification as an example.
automl_config = videointelligence.StreamingAutomlClassificationConfig(
    model_name=model_path
)

video_config = videointelligence.StreamingVideoConfig(
    feature=videointelligence.StreamingFeature.STREAMING_AUTOML_CLASSIFICATION,
    automl_classification_config=automl_config,
)

# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
    video_config=video_config
)

# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024

# Load file content.
# Note: Input videos must have supported video codecs. See
# https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs
# for more details.
stream = []
with io.open(path, "rb") as video_file:
    while True:
        data = video_file.read(chunk_size)
        if not data:
            break
        stream.append(data)

def stream_generator():
    yield config_request
    for chunk in stream:
        yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk)

requests = stream_generator()

# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the stream.
responses = client.streaming_annotate_video(requests, timeout=600)

for response in responses:
    # Check for errors.
    if response.error.message:
        print(response.error.message)
        break

    for label in response.annotation_results.label_annotations:
        for frame in label.frames:
            print(
                "At {:3d}s segment, {:5.1%} {}".format(
                    frame.time_offset.seconds,
                    frame.confidence,
                    label.entity.entity_id,
                )
            )