funclabel(wio.Writer,filestring)error{ctx:=context.Background()client,err:=video.NewClient(ctx)iferr!=nil{returnfmt.Errorf("video.NewClient: %w",err)}deferclient.Close()fileBytes,err:=os.ReadFile(file)iferr!=nil{returnerr}op,err:=client.AnnotateVideo(ctx,&videopb.AnnotateVideoRequest{Features:[]videopb.Feature{videopb.Feature_LABEL_DETECTION,},InputContent:fileBytes,})iferr!=nil{returnfmt.Errorf("AnnotateVideo: %w",err)}resp,err:=op.Wait(ctx)iferr!=nil{returnfmt.Errorf("Wait: %w",err)}printLabels:=func(labels[]*videopb.LabelAnnotation){for_,label:=rangelabels{fmt.Fprintf(w,"\tDescription: %s\n",label.Entity.Description)for_,category:=rangelabel.CategoryEntities{fmt.Fprintf(w,"\t\tCategory: %s\n",category.Description)}for_,segment:=rangelabel.Segments{start,_:=ptypes.Duration(segment.Segment.StartTimeOffset)end,_:=ptypes.Duration(segment.Segment.EndTimeOffset)fmt.Fprintf(w,"\t\tSegment: %s to %s\n",start,end)}}}// A single video was processed. Get the first result.result:=resp.AnnotationResults[0]fmt.Fprintln(w,"SegmentLabelAnnotations:")printLabels(result.SegmentLabelAnnotations)fmt.Fprintln(w,"ShotLabelAnnotations:")printLabels(result.ShotLabelAnnotations)fmt.Fprintln(w,"FrameLabelAnnotations:")printLabels(result.FrameLabelAnnotations)returnnil}
Java
// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClienttry(VideoIntelligenceServiceClientclient=VideoIntelligenceServiceClient.create()){// Read file and encode into Base64Pathpath=Paths.get(filePath);byte[]data=Files.readAllBytes(path);AnnotateVideoRequestrequest=AnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(data)).addFeatures(Feature.LABEL_DETECTION).build();// Create an operation that will contain the response when the operation completes.OperationFuture<AnnotateVideoResponse,AnnotateVideoProgress>response=client.annotateVideoAsync(request);System.out.println("Waiting for operation to complete...");for(VideoAnnotationResultsresults:response.get().getAnnotationResultsList()){// process video / segment level label annotationsSystem.out.println("Locations: ");for(LabelAnnotationlabelAnnotation:results.getSegmentLabelAnnotationsList()){System.out.println("Video label: "+labelAnnotation.getEntity().getDescription());// categoriesfor(EntitycategoryEntity:labelAnnotation.getCategoryEntitiesList()){System.out.println("Video label category: "+categoryEntity.getDescription());}// segmentsfor(LabelSegmentsegment:labelAnnotation.getSegmentsList()){doublestartTime=segment.getSegment().getStartTimeOffset().getSeconds()+segment.getSegment().getStartTimeOffset().getNanos()/1e9;doubleendTime=segment.getSegment().getEndTimeOffset().getSeconds()+segment.getSegment().getEndTimeOffset().getNanos()/1e9;System.out.printf("Segment location: %.3f:%.2f\n",startTime,endTime);System.out.println("Confidence: "+segment.getConfidence());}}// process shot label annotationsfor(LabelAnnotationlabelAnnotation:results.getShotLabelAnnotationsList()){System.out.println("Shot label: "+labelAnnotation.getEntity().getDescription());// categoriesfor(EntitycategoryEntity:labelAnnotation.getCategoryEntitiesList()){System.out.println("Shot label category: "+categoryEntity.getDescription());}// segmentsfor(LabelSegmentsegment:labelAnnotation.getSegmentsList()){doublestartTime=segment.getSegment().getStartTimeOffset().getSeconds()+segment.getSegment().getStartTimeOffset().getNanos()/1e9;doubleendTime=segment.getSegment().getEndTimeOffset().getSeconds()+segment.getSegment().getEndTimeOffset().getNanos()/1e9;System.out.printf("Segment location: %.3f:%.2f\n",startTime,endTime);System.out.println("Confidence: "+segment.getConfidence());}}// process frame label annotationsfor(LabelAnnotationlabelAnnotation:results.getFrameLabelAnnotationsList()){System.out.println("Frame label: "+labelAnnotation.getEntity().getDescription());// categoriesfor(EntitycategoryEntity:labelAnnotation.getCategoryEntitiesList()){System.out.println("Frame label category: "+categoryEntity.getDescription());}// segmentsfor(LabelSegmentsegment:labelAnnotation.getSegmentsList()){doublestartTime=segment.getSegment().getStartTimeOffset().getSeconds()+segment.getSegment().getStartTimeOffset().getNanos()/1e9;doubleendTime=segment.getSegment().getEndTimeOffset().getSeconds()+segment.getSegment().getEndTimeOffset().getNanos()/1e9;System.out.printf("Segment location: %.3f:%.2f\n",startTime,endTime);System.out.println("Confidence: "+segment.getConfidence());}}}}
Node.js
// Imports the Google Cloud Video Intelligence library + Node's fs libraryconstvideo=require('@google-cloud/video-intelligence').v1;constfs=require('fs');constutil=require('util');// Creates a clientconstclient=newvideo.VideoIntelligenceServiceClient();/** * TODO(developer): Uncomment the following line before running the sample. */// const path = 'Local file to analyze, e.g. ./my-file.mp4';// Reads a local video file and converts it to base64constreadFile=util.promisify(fs.readFile);constfile=awaitreadFile(path);constinputContent=file.toString('base64');// Constructs requestconstrequest={inputContent:inputContent,features:['LABEL_DETECTION'],};// Detects labels in a videoconst[operation]=awaitclient.annotateVideo(request);console.log('Waiting for operation to complete...');const[operationResult]=awaitoperation.promise();// Gets annotations for videoconstannotations=operationResult.annotationResults[0];constlabels=annotations.segmentLabelAnnotations;labels.forEach(label=>{console.log(`Label ${label.entity.description} occurs at:`);label.segments.forEach(segment=>{consttime=segment.segment;if(time.startTimeOffset.seconds===undefined){time.startTimeOffset.seconds=0;}if(time.startTimeOffset.nanos===undefined){time.startTimeOffset.nanos=0;}if(time.endTimeOffset.seconds===undefined){time.endTimeOffset.seconds=0;}if(time.endTimeOffset.nanos===undefined){time.endTimeOffset.nanos=0;}console.log(`\tStart: ${time.startTimeOffset.seconds}`+`.${(time.startTimeOffset.nanos/1e6).toFixed(0)}s`);console.log(`\tEnd: ${time.endTimeOffset.seconds}.`+`${(time.endTimeOffset.nanos/1e6).toFixed(0)}s`);console.log(`\tConfidence: ${segment.confidence}`);});});
"""Detect labels given a file path."""video_client=videointelligence.VideoIntelligenceServiceClient()features=[videointelligence.Feature.LABEL_DETECTION]withio.open(path,"rb")asmovie:input_content=movie.read()operation=video_client.annotate_video(request={"features":features,"input_content":input_content})print("\nProcessing video for label annotations:")result=operation.result(timeout=90)print("\nFinished processing.")# Process video/segment level label annotationssegment_labels=result.annotation_results[0].segment_label_annotationsfori,segment_labelinenumerate(segment_labels):print("Video label description: {}".format(segment_label.entity.description))forcategory_entityinsegment_label.category_entities:print("\tLabel category description: {}".format(category_entity.description))fori,segmentinenumerate(segment_label.segments):start_time=(segment.segment.start_time_offset.seconds+segment.segment.start_time_offset.microseconds/1e6)end_time=(segment.segment.end_time_offset.seconds+segment.segment.end_time_offset.microseconds/1e6)positions="{}s to {}s".format(start_time,end_time)confidence=segment.confidenceprint("\tSegment {}: {}".format(i,positions))print("\tConfidence: {}".format(confidence))print("\n")# Process shot level label annotationsshot_labels=result.annotation_results[0].shot_label_annotationsfori,shot_labelinenumerate(shot_labels):print("Shot label description: {}".format(shot_label.entity.description))forcategory_entityinshot_label.category_entities:print("\tLabel category description: {}".format(category_entity.description))fori,shotinenumerate(shot_label.segments):start_time=(shot.segment.start_time_offset.seconds+shot.segment.start_time_offset.microseconds/1e6)end_time=(shot.segment.end_time_offset.seconds+shot.segment.end_time_offset.microseconds/1e6)positions="{}s to {}s".format(start_time,end_time)confidence=shot.confidenceprint("\tSegment {}: {}".format(i,positions))print("\tConfidence: {}".format(confidence))print("\n")# Process frame level label annotationsframe_labels=result.annotation_results[0].frame_label_annotationsfori,frame_labelinenumerate(frame_labels):print("Frame label description: {}".format(frame_label.entity.description))forcategory_entityinframe_label.category_entities:print("\tLabel category description: {}".format(category_entity.description))# Each frame_label_annotation has many frames,# here we print information only about the first frame.frame=frame_label.frames[0]time_offset=frame.time_offset.seconds+frame.time_offset.microseconds/1e6print("\tFirst frame time offset: {}s".format(time_offset))print("\tFirst frame confidence: {}".format(frame.confidence))print("\n")
注: 出力 GCS URI がユーザーによって指定された場合、アノテーションはその GCS URI に格納されます。
Go
funclabelURI(wio.Writer,filestring)error{ctx:=context.Background()client,err:=video.NewClient(ctx)iferr!=nil{returnfmt.Errorf("video.NewClient: %w",err)}deferclient.Close()op,err:=client.AnnotateVideo(ctx,&videopb.AnnotateVideoRequest{Features:[]videopb.Feature{videopb.Feature_LABEL_DETECTION,},InputUri:file,})iferr!=nil{returnfmt.Errorf("AnnotateVideo: %w",err)}resp,err:=op.Wait(ctx)iferr!=nil{returnfmt.Errorf("Wait: %w",err)}printLabels:=func(labels[]*videopb.LabelAnnotation){for_,label:=rangelabels{fmt.Fprintf(w,"\tDescription: %s\n",label.Entity.Description)for_,category:=rangelabel.CategoryEntities{fmt.Fprintf(w,"\t\tCategory: %s\n",category.Description)}for_,segment:=rangelabel.Segments{start,_:=ptypes.Duration(segment.Segment.StartTimeOffset)end,_:=ptypes.Duration(segment.Segment.EndTimeOffset)fmt.Fprintf(w,"\t\tSegment: %s to %s\n",start,end)}}}// A single video was processed. Get the first result.result:=resp.AnnotationResults[0]fmt.Fprintln(w,"SegmentLabelAnnotations:")printLabels(result.SegmentLabelAnnotations)fmt.Fprintln(w,"ShotLabelAnnotations:")printLabels(result.ShotLabelAnnotations)fmt.Fprintln(w,"FrameLabelAnnotations:")printLabels(result.FrameLabelAnnotations)returnnil}
Java
// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClienttry(VideoIntelligenceServiceClientclient=VideoIntelligenceServiceClient.create()){// Provide path to file hosted on GCS as "gs://bucket-name/..."AnnotateVideoRequestrequest=AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.LABEL_DETECTION).build();// Create an operation that will contain the response when the operation completes.OperationFuture<AnnotateVideoResponse,AnnotateVideoProgress>response=client.annotateVideoAsync(request);System.out.println("Waiting for operation to complete...");for(VideoAnnotationResultsresults:response.get().getAnnotationResultsList()){// process video / segment level label annotationsSystem.out.println("Locations: ");for(LabelAnnotationlabelAnnotation:results.getSegmentLabelAnnotationsList()){System.out.println("Video label: "+labelAnnotation.getEntity().getDescription());// categoriesfor(EntitycategoryEntity:labelAnnotation.getCategoryEntitiesList()){System.out.println("Video label category: "+categoryEntity.getDescription());}// segmentsfor(LabelSegmentsegment:labelAnnotation.getSegmentsList()){doublestartTime=segment.getSegment().getStartTimeOffset().getSeconds()+segment.getSegment().getStartTimeOffset().getNanos()/1e9;doubleendTime=segment.getSegment().getEndTimeOffset().getSeconds()+segment.getSegment().getEndTimeOffset().getNanos()/1e9;System.out.printf("Segment location: %.3f:%.3f\n",startTime,endTime);System.out.println("Confidence: "+segment.getConfidence());}}// process shot label annotationsfor(LabelAnnotationlabelAnnotation:results.getShotLabelAnnotationsList()){System.out.println("Shot label: "+labelAnnotation.getEntity().getDescription());// categoriesfor(EntitycategoryEntity:labelAnnotation.getCategoryEntitiesList()){System.out.println("Shot label category: "+categoryEntity.getDescription());}// segmentsfor(LabelSegmentsegment:labelAnnotation.getSegmentsList()){doublestartTime=segment.getSegment().getStartTimeOffset().getSeconds()+segment.getSegment().getStartTimeOffset().getNanos()/1e9;doubleendTime=segment.getSegment().getEndTimeOffset().getSeconds()+segment.getSegment().getEndTimeOffset().getNanos()/1e9;System.out.printf("Segment location: %.3f:%.3f\n",startTime,endTime);System.out.println("Confidence: "+segment.getConfidence());}}// process frame label annotationsfor(LabelAnnotationlabelAnnotation:results.getFrameLabelAnnotationsList()){System.out.println("Frame label: "+labelAnnotation.getEntity().getDescription());// categoriesfor(EntitycategoryEntity:labelAnnotation.getCategoryEntitiesList()){System.out.println("Frame label category: "+categoryEntity.getDescription());}// segmentsfor(LabelSegmentsegment:labelAnnotation.getSegmentsList()){doublestartTime=segment.getSegment().getStartTimeOffset().getSeconds()+segment.getSegment().getStartTimeOffset().getNanos()/1e9;doubleendTime=segment.getSegment().getEndTimeOffset().getSeconds()+segment.getSegment().getEndTimeOffset().getNanos()/1e9;System.out.printf("Segment location: %.3f:%.2f\n",startTime,endTime);System.out.println("Confidence: "+segment.getConfidence());}}}}
Node.js
// Imports the Google Cloud Video Intelligence libraryconstvideo=require('@google-cloud/video-intelligence').v1;// Creates a clientconstclient=newvideo.VideoIntelligenceServiceClient();/** * TODO(developer): Uncomment the following line before running the sample. */// const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4';constrequest={inputUri:gcsUri,features:['LABEL_DETECTION'],};// Detects labels in a videoconst[operation]=awaitclient.annotateVideo(request);console.log('Waiting for operation to complete...');const[operationResult]=awaitoperation.promise();// Gets annotations for videoconstannotations=operationResult.annotationResults[0];constlabels=annotations.segmentLabelAnnotations;labels.forEach(label=>{console.log(`Label ${label.entity.description} occurs at:`);label.segments.forEach(segment=>{consttime=segment.segment;if(time.startTimeOffset.seconds===undefined){time.startTimeOffset.seconds=0;}if(time.startTimeOffset.nanos===undefined){time.startTimeOffset.nanos=0;}if(time.endTimeOffset.seconds===undefined){time.endTimeOffset.seconds=0;}if(time.endTimeOffset.nanos===undefined){time.endTimeOffset.nanos=0;}console.log(`\tStart: ${time.startTimeOffset.seconds}`+`.${(time.startTimeOffset.nanos/1e6).toFixed(0)}s`);console.log(`\tEnd: ${time.endTimeOffset.seconds}.`+`${(time.endTimeOffset.nanos/1e6).toFixed(0)}s`);console.log(`\tConfidence: ${segment.confidence}`);});});
Python
"""Detects labels given a GCS path."""video_client=videointelligence.VideoIntelligenceServiceClient()features=[videointelligence.Feature.LABEL_DETECTION]mode=videointelligence.LabelDetectionMode.SHOT_AND_FRAME_MODEconfig=videointelligence.LabelDetectionConfig(label_detection_mode=mode)context=videointelligence.VideoContext(label_detection_config=config)operation=video_client.annotate_video(request={"features":features,"input_uri":path,"video_context":context})print("\nProcessing video for label annotations:")result=operation.result(timeout=180)print("\nFinished processing.")# Process video/segment level label annotationssegment_labels=result.annotation_results[0].segment_label_annotationsfori,segment_labelinenumerate(segment_labels):print("Video label description: {}".format(segment_label.entity.description))forcategory_entityinsegment_label.category_entities:print("\tLabel category description: {}".format(category_entity.description))fori,segmentinenumerate(segment_label.segments):start_time=(segment.segment.start_time_offset.seconds+segment.segment.start_time_offset.microseconds/1e6)end_time=(segment.segment.end_time_offset.seconds+segment.segment.end_time_offset.microseconds/1e6)positions="{}s to {}s".format(start_time,end_time)confidence=segment.confidenceprint("\tSegment {}: {}".format(i,positions))print("\tConfidence: {}".format(confidence))print("\n")# Process shot level label annotationsshot_labels=result.annotation_results[0].shot_label_annotationsfori,shot_labelinenumerate(shot_labels):print("Shot label description: {}".format(shot_label.entity.description))forcategory_entityinshot_label.category_entities:print("\tLabel category description: {}".format(category_entity.description))fori,shotinenumerate(shot_label.segments):start_time=(shot.segment.start_time_offset.seconds+shot.segment.start_time_offset.microseconds/1e6)end_time=(shot.segment.end_time_offset.seconds+shot.segment.end_time_offset.microseconds/1e6)positions="{}s to {}s".format(start_time,end_time)confidence=shot.confidenceprint("\tSegment {}: {}".format(i,positions))print("\tConfidence: {}".format(confidence))print("\n")# Process frame level label annotationsframe_labels=result.annotation_results[0].frame_label_annotationsfori,frame_labelinenumerate(frame_labels):print("Frame label description: {}".format(frame_label.entity.description))forcategory_entityinframe_label.category_entities:print("\tLabel category description: {}".format(category_entity.description))# Each frame_label_annotation has many frames,# here we print information only about the first frame.frame=frame_label.frames[0]time_offset=frame.time_offset.seconds+frame.time_offset.microseconds/1e6print("\tFirst frame time offset: {}s".format(time_offset))print("\tFirst frame confidence: {}".format(frame.confidence))print("\n")
[[["わかりやすい","easyToUnderstand","thumb-up"],["問題の解決に役立った","solvedMyProblem","thumb-up"],["その他","otherUp","thumb-up"]],[["Hard to understand","hardToUnderstand","thumb-down"],["Incorrect information or sample code","incorrectInformationOrSampleCode","thumb-down"],["Missing the information/samples I need","missingTheInformationSamplesINeed","thumb-down"],["翻訳に関する問題","translationIssue","thumb-down"],["その他","otherDown","thumb-down"]],["最終更新日 2024-11-19 UTC。"],[],[]]