動画のショット変更の分析

ショット変更の分析では、動画内のショットの変更を検出します。

このセクションでは、動画のショット変更を分析する方法について説明します。

ここでは、Google Cloud Storage にある動画ファイルのショット変更を分析します。

詳細については、Python のチュートリアルをご覧ください。

プロトコル

詳細については、videos:annotate API エンドポイントをご覧ください。

ショットを検出するには、POST リクエストを作成して適切なリクエスト本文を提供します。

POST https://videointelligence.googleapis.com/v1/videos:annotate?key=YOUR_API_KEY
{
  "inputUri": "gs://demomaker/gbikes_dinosaur.mp4",
  "features": ["SHOT_CHANGE_DETECTION"]
}

Video Intelligence のアノテーション リクエストに成功すると、次のような name フィールドを含むレスポンスが返されます。

{
  "name": "us-west1.16680573"
}

この名前は長時間実行オペレーションを表しています。v1.operations API を使用すると、長時間実行オペレーションをクエリできます。

動画のアノテーション レスポンスを取得するには、v1.operations エンドポイントに GET リクエストを送信し、URL で name の値を渡します。オペレーションが完了すると、アノテーションの結果が返されます。

ショット検出のアノテーションは shotAnnotations リストとして返されます。

[
  {
    "inputUri": "/demomaker/gbikes_dinosaur.mp4",
    "shotAnnotations": [
      {
        "startTimeOffset": "0s",
        "endTimeOffset": "5.167104s"
      },
      {
        "startTimeOffset": "5.236736s",
        "endTimeOffset": "10.072064s"
      },
      {
        "startTimeOffset": "10.106880s",
        "endTimeOffset": "28.139520s"
      },
      {
        "startTimeOffset": "28.174336s",
        "endTimeOffset": "42.768384s"
      }
    ]
  }
]

C#

public static object AnalyzeShotsGcs(string uri)
{
    var client = VideoIntelligenceServiceClient.Create();
    var request = new AnnotateVideoRequest()
    {
        InputUri = uri,
        Features = { Feature.ShotChangeDetection }
    };
    var op = client.AnnotateVideo(request).PollUntilCompleted();
    foreach (var result in op.Result.AnnotationResults)
    {
        foreach (var annotation in result.ShotAnnotations)
        {
            Console.Out.WriteLine("Start Time Offset: {0}\tEnd Time Offset: {1}",
                annotation.StartTimeOffset, annotation.EndTimeOffset);
        }
    }
    return 0;
}

Go

func shotChangeURI(w io.Writer, file string) error {
	ctx := context.Background()
	client, err := video.NewClient(ctx)
	if err != nil {
		return err
	}

	op, err := client.AnnotateVideo(ctx, &videopb.AnnotateVideoRequest{
		Features: []videopb.Feature{
			videopb.Feature_SHOT_CHANGE_DETECTION,
		},
		InputUri: file,
	})
	if err != nil {
		return err
	}
	resp, err := op.Wait(ctx)
	if err != nil {
		return err
	}

	// A single video was processed. Get the first result.
	result := resp.AnnotationResults[0].ShotAnnotations

	for _, shot := range result {
		start, _ := ptypes.Duration(shot.StartTimeOffset)
		end, _ := ptypes.Duration(shot.EndTimeOffset)

		fmt.Fprintf(w, "Shot: %s to %s\n", start, end)
	}

	return nil
}

Java

// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
  // Provide path to file hosted on GCS as "gs://bucket-name/..."
  AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
      .setInputUri(gcsUri)
      .addFeatures(Feature.SHOT_CHANGE_DETECTION)
      .build();

  // Create an operation that will contain the response when the operation completes.
  OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response =
      client.annotateVideoAsync(request);

  System.out.println("Waiting for operation to complete...");
  // Print detected shot changes and their location ranges in the analyzed video.
  for (VideoAnnotationResults result : response.get().getAnnotationResultsList()) {
    if (result.getShotAnnotationsCount() > 0) {
      System.out.println("Shots: ");
      for (VideoSegment segment : result.getShotAnnotationsList()) {
        double startTime = segment.getStartTimeOffset().getSeconds()
            + segment.getStartTimeOffset().getNanos() / 1e9;
        double endTime = segment.getEndTimeOffset().getSeconds()
            + segment.getEndTimeOffset().getNanos() / 1e9;
        System.out.printf("Location: %.3f:%.3f\n", startTime, endTime);
      }
    } else {
      System.out.println("No shot changes detected in " + gcsUri);
    }
  }
}

Node.js

// Imports the Google Cloud Video Intelligence library
const video = require('@google-cloud/video-intelligence').v1;

// Creates a client
const client = new video.VideoIntelligenceServiceClient();

/**
 * TODO(developer): Uncomment the following line before running the sample.
 */
// const gcsUri = 'GCS URI of file to analyze, e.g. gs://my-bucket/my-video.mp4';

const request = {
  inputUri: gcsUri,
  features: ['SHOT_CHANGE_DETECTION'],
};

// Detects camera shot changes
const [operation] = await client.annotateVideo(request);
console.log('Waiting for operation to complete...');
const [operationResult] = await operation.promise();
// Gets shot changes
const shotChanges = operationResult.annotationResults[0].shotAnnotations;
console.log('Shot changes:');

if (shotChanges.length === 1) {
  console.log(`The entire video is one shot.`);
} else {
  shotChanges.forEach((shot, shotIdx) => {
    console.log(`Scene ${shotIdx} occurs from:`);
    if (shot.startTimeOffset === undefined) {
      shot.startTimeOffset = {};
    }
    if (shot.endTimeOffset === undefined) {
      shot.endTimeOffset = {};
    }
    if (shot.startTimeOffset.seconds === undefined) {
      shot.startTimeOffset.seconds = 0;
    }
    if (shot.startTimeOffset.nanos === undefined) {
      shot.startTimeOffset.nanos = 0;
    }
    if (shot.endTimeOffset.seconds === undefined) {
      shot.endTimeOffset.seconds = 0;
    }
    if (shot.endTimeOffset.nanos === undefined) {
      shot.endTimeOffset.nanos = 0;
    }
    console.log(
      `\tStart: ${shot.startTimeOffset.seconds}` +
        `.${(shot.startTimeOffset.nanos / 1e6).toFixed(0)}s`
    );
    console.log(
      `\tEnd: ${shot.endTimeOffset.seconds}.` +
        `${(shot.endTimeOffset.nanos / 1e6).toFixed(0)}s`
    );
  });
}

Python

Python 用の Cloud Video Intelligence API クライアント ライブラリのインストールと使用方法については、Cloud Video Intelligence API クライアント ライブラリをご覧ください。
""" Detects camera shot changes. """
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.SHOT_CHANGE_DETECTION]
operation = video_client.annotate_video(path, features=features)
print('\nProcessing video for shot change annotations:')

result = operation.result(timeout=90)
print('\nFinished processing.')

# first result is retrieved because a single video was processed
for i, shot in enumerate(result.annotation_results[0].shot_annotations):
    start_time = (shot.start_time_offset.seconds +
                  shot.start_time_offset.nanos / 1e9)
    end_time = (shot.end_time_offset.seconds +
                shot.end_time_offset.nanos / 1e9)
    print('\tShot {}: {} to {}'.format(i, start_time, end_time))

PHP

use Google\Cloud\VideoIntelligence\V1\VideoIntelligenceServiceClient;
use Google\Cloud\VideoIntelligence\V1\Feature;

/** Uncomment and populate these variables in your code */
// $uri = 'The cloud storage object to analyze (gs://your-bucket-name/your-object-name)';
// $options = [];

# Instantiate a client.
$video = new VideoIntelligenceServiceClient();

# Execute a request.
$operation = $video->annotateVideo([
    'inputUri' => $uri,
    'features' => [Feature::SHOT_CHANGE_DETECTION]
]);

# Wait for the request to complete.
$operation->pollUntilComplete($options);

# Print the result.
if ($operation->operationSucceeded()) {
    $results = $operation->getResult()->getAnnotationResults()[0];
    foreach ($results->getShotAnnotations() as $shot) {
        $start = $shot->getStartTimeOffset();
        $end = $shot->getEndTimeOffset();
        printf('Shot: %ss to %ss' . PHP_EOL,
            $start->getSeconds() + $start->getNanos()/1000000000.0,
            $end->getSeconds() + $end->getNanos()/1000000000.0);
    }
} else {
    print_r($operation->getError());
}

Ruby

# path = "Path to a video file on Google Cloud Storage: gs://bucket/video.mp4"

require "google/cloud/video_intelligence"

video = Google::Cloud::VideoIntelligence.new

# Register a callback during the method call
operation = video.annotate_video input_uri: path, features: [:SHOT_CHANGE_DETECTION] do |operation|
  raise operation.results.message? if operation.error?
  puts "Finished processing."

  # first result is retrieved because a single video was processed
  annotation_result = operation.results.annotation_results.first
  puts "Scenes:"

  annotation_result.shot_annotations.each do |shot_annotation|
    start_time = (shot_annotation.start_time_offset.seconds +
                   shot_annotation.start_time_offset.nanos / 1e9)
    end_time =   (shot_annotation.end_time_offset.seconds +
                   shot_annotation.end_time_offset.nanos / 1e9)

    puts "#{start_time} to #{end_time}"
  end
end

puts "Processing video for shot change annotations:"
operation.wait_until_done!

このページは役立ちましたか?評価をお願いいたします。

フィードバックを送信...

Cloud Video Intelligence API ドキュメント