Labels in einem Video erkennen, das in Cloud Storage gespeichert ist
Dokumentationsseiten mit diesem Codebeispiel
Die folgenden Dokumente enthalten das Codebeispiel im Kontext:
Codebeispiel
Go
// Sample video_quickstart uses the Google Cloud Video Intelligence API to label a video.
package main
import (
"context"
"fmt"
"log"
"github.com/golang/protobuf/ptypes"
video "cloud.google.com/go/videointelligence/apiv1"
videopb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1"
)
func main() {
ctx := context.Background()
// Creates a client.
client, err := video.NewClient(ctx)
if err != nil {
log.Fatalf("Failed to create client: %v", err)
}
defer client.Close()
op, err := client.AnnotateVideo(ctx, &videopb.AnnotateVideoRequest{
InputUri: "gs://cloud-samples-data/video/cat.mp4",
Features: []videopb.Feature{
videopb.Feature_LABEL_DETECTION,
},
})
if err != nil {
log.Fatalf("Failed to start annotation job: %v", err)
}
resp, err := op.Wait(ctx)
if err != nil {
log.Fatalf("Failed to annotate: %v", err)
}
// Only one video was processed, so get the first result.
result := resp.GetAnnotationResults()[0]
for _, annotation := range result.SegmentLabelAnnotations {
fmt.Printf("Description: %s\n", annotation.Entity.Description)
for _, category := range annotation.CategoryEntities {
fmt.Printf("\tCategory: %s\n", category.Description)
}
for _, segment := range annotation.Segments {
start, _ := ptypes.Duration(segment.Segment.StartTimeOffset)
end, _ := ptypes.Duration(segment.Segment.EndTimeOffset)
fmt.Printf("\tSegment: %s to %s\n", start, end)
fmt.Printf("\tConfidence: %v\n", segment.Confidence)
}
}
}
Java
import com.google.api.gax.longrunning.OperationFuture;
import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
import com.google.cloud.videointelligence.v1.Entity;
import com.google.cloud.videointelligence.v1.Feature;
import com.google.cloud.videointelligence.v1.LabelAnnotation;
import com.google.cloud.videointelligence.v1.LabelSegment;
import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
import java.util.List;
public class QuickstartSample {
/** Demonstrates using the video intelligence client to detect labels in a video file. */
public static void main(String[] args) throws Exception {
// Instantiate a video intelligence client
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// The Google Cloud Storage path to the video to annotate.
String gcsUri = "gs://cloud-samples-data/video/cat.mp4";
// Create an operation that will contain the response when the operation completes.
AnnotateVideoRequest request =
AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.LABEL_DETECTION)
.build();
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response =
client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
List<VideoAnnotationResults> results = response.get().getAnnotationResultsList();
if (results.isEmpty()) {
System.out.println("No labels detected in " + gcsUri);
return;
}
for (VideoAnnotationResults result : results) {
System.out.println("Labels:");
// get video segment label annotations
for (LabelAnnotation annotation : result.getSegmentLabelAnnotationsList()) {
System.out.println(
"Video label description : " + annotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : annotation.getCategoryEntitiesList()) {
System.out.println("Label Category description : " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : annotation.getSegmentsList()) {
double startTime =
segment.getSegment().getStartTimeOffset().getSeconds()
+ segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime =
segment.getSegment().getEndTimeOffset().getSeconds()
+ segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location : %.3f:%.3f\n", startTime, endTime);
System.out.println("Confidence : " + segment.getConfidence());
}
}
}
}
}
}
Node.js
// Imports the Google Cloud Video Intelligence library
const videoIntelligence = require('@google-cloud/video-intelligence');
// Creates a client
const client = new videoIntelligence.VideoIntelligenceServiceClient();
// The GCS uri of the video to analyze
const gcsUri = 'gs://cloud-samples-data/video/cat.mp4';
// Construct request
const request = {
inputUri: gcsUri,
features: ['LABEL_DETECTION'],
};
// Execute request
const [operation] = await client.annotateVideo(request);
console.log(
'Waiting for operation to complete... (this may take a few minutes)'
);
const [operationResult] = await operation.promise();
// Gets annotations for video
const annotations = operationResult.annotationResults[0];
// Gets labels for video from its annotations
const labels = annotations.segmentLabelAnnotations;
labels.forEach(label => {
console.log(`Label ${label.entity.description} occurs at:`);
label.segments.forEach(segment => {
segment = segment.segment;
console.log(
`\tStart: ${segment.startTimeOffset.seconds}` +
`.${(segment.startTimeOffset.nanos / 1e6).toFixed(0)}s`
);
console.log(
`\tEnd: ${segment.endTimeOffset.seconds}.` +
`${(segment.endTimeOffset.nanos / 1e6).toFixed(0)}s`
);
});
});
PHP
use Google\Cloud\VideoIntelligence\V1\VideoIntelligenceServiceClient;
use Google\Cloud\VideoIntelligence\V1\Feature;
# Instantiate a client.
$video = new VideoIntelligenceServiceClient();
# Execute a request.
$features = [Feature::LABEL_DETECTION];
$options = [
'inputUri' => 'gs://cloud-samples-data/video/cat.mp4',
'features' => $features
];
$operation = $video->annotateVideo($options);
# Wait for the request to complete.
$operation->pollUntilComplete();
# Print the result.
if ($operation->operationSucceeded()) {
$results = $operation->getResult()->getAnnotationResults()[0];
# Process video/segment level label annotations
foreach ($results->getSegmentLabelAnnotations() as $label) {
printf('Video label description: %s' . PHP_EOL, $label->getEntity()->getDescription());
foreach ($label->getCategoryEntities() as $categoryEntity) {
printf(' Category: %s' . PHP_EOL, $categoryEntity->getDescription());
}
foreach ($label->getSegments() as $segment) {
$start = $segment->getSegment()->getStartTimeOffset();
$end = $segment->getSegment()->getEndTimeOffset();
printf(' Segment: %ss to %ss' . PHP_EOL,
$start->getSeconds() + $start->getNanos() / 1000000000.0,
$end->getSeconds() + $end->getNanos() / 1000000000.0
);
printf(' Confidence: %f' . PHP_EOL, $segment->getConfidence());
}
}
} else {
print_r($operation->getError());
}
Python
from google.cloud import videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.Feature.LABEL_DETECTION]
operation = video_client.annotate_video(
request={
"features": features,
"input_uri": "gs://cloud-samples-data/video/cat.mp4",
}
)
print("\nProcessing video for label annotations:")
result = operation.result(timeout=180)
print("\nFinished processing.")
# first result is retrieved because a single video was processed
segment_labels = result.annotation_results[0].segment_label_annotations
for i, segment_label in enumerate(segment_labels):
print("Video label description: {}".format(segment_label.entity.description))
for category_entity in segment_label.category_entities:
print(
"\tLabel category description: {}".format(category_entity.description)
)
for i, segment in enumerate(segment_label.segments):
start_time = (
segment.segment.start_time_offset.seconds
+ segment.segment.start_time_offset.microseconds / 1e6
)
end_time = (
segment.segment.end_time_offset.seconds
+ segment.segment.end_time_offset.microseconds / 1e6
)
positions = "{}s to {}s".format(start_time, end_time)
confidence = segment.confidence
print("\tSegment {}: {}".format(i, positions))
print("\tConfidence: {}".format(confidence))
print("\n")
Nächste Schritte
Codebeispiele für andere Google Cloud-Produkte finden Sie im Google Cloud-Beispielbrowser.