Video Intelligence への認証を行うには、アプリケーションのデフォルト認証情報を設定します。
詳細については、ローカル開発環境の認証を設定するをご覧ください。
importcom.google.api.gax.longrunning.OperationFuture;importcom.google.cloud.videointelligence.v1.AnnotateVideoProgress;importcom.google.cloud.videointelligence.v1.AnnotateVideoRequest;importcom.google.cloud.videointelligence.v1.AnnotateVideoResponse;importcom.google.cloud.videointelligence.v1.DetectedAttribute;importcom.google.cloud.videointelligence.v1.FaceDetectionAnnotation;importcom.google.cloud.videointelligence.v1.FaceDetectionConfig;importcom.google.cloud.videointelligence.v1.Feature;importcom.google.cloud.videointelligence.v1.TimestampedObject;importcom.google.cloud.videointelligence.v1.Track;importcom.google.cloud.videointelligence.v1.VideoAnnotationResults;importcom.google.cloud.videointelligence.v1.VideoContext;importcom.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;importcom.google.cloud.videointelligence.v1.VideoSegment;publicclassDetectFacesGcs{publicstaticvoiddetectFacesGcs()throwsException{// TODO(developer): Replace these variables before running the sample.StringgcsUri="gs://cloud-samples-data/video/googlework_short.mp4";detectFacesGcs(gcsUri);}// Detects faces in a video stored in Google Cloud Storage using the Cloud Video Intelligence API.publicstaticvoiddetectFacesGcs(StringgcsUri)throwsException{try(VideoIntelligenceServiceClientvideoIntelligenceServiceClient=VideoIntelligenceServiceClient.create()){FaceDetectionConfigfaceDetectionConfig=FaceDetectionConfig.newBuilder()// Must set includeBoundingBoxes to true to get facial attributes..setIncludeBoundingBoxes(true).setIncludeAttributes(true).build();VideoContextvideoContext=VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();AnnotateVideoRequestrequest=AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.FACE_DETECTION).setVideoContext(videoContext).build();// Detects faces in a videoOperationFuture<AnnotateVideoResponse,AnnotateVideoProgress>future=videoIntelligenceServiceClient.annotateVideoAsync(request);System.out.println("Waiting for operation to complete...");AnnotateVideoResponseresponse=future.get();// Gets annotations for videoVideoAnnotationResultsannotationResult=response.getAnnotationResultsList().get(0);// Annotations for list of people detected, tracked and recognized in video.for(FaceDetectionAnnotationfaceDetectionAnnotation:annotationResult.getFaceDetectionAnnotationsList()){System.out.print("Face detected:\n");for(Tracktrack:faceDetectionAnnotation.getTracksList()){VideoSegmentsegment=track.getSegment();System.out.printf("\tStart: %d.%.0fs\n",segment.getStartTimeOffset().getSeconds(),segment.getStartTimeOffset().getNanos()/1e6);System.out.printf("\tEnd: %d.%.0fs\n",segment.getEndTimeOffset().getSeconds(),segment.getEndTimeOffset().getNanos()/1e6);// Each segment includes timestamped objects that// include characteristics of the face detected.TimestampedObjectfirstTimestampedObject=track.getTimestampedObjects(0);for(DetectedAttributeattribute:firstTimestampedObject.getAttributesList()){// Attributes include glasses, headwear, smiling, direction of gazeSystem.out.printf("\tAttribute %s: %s %s\n",attribute.getName(),attribute.getValue(),attribute.getConfidence());}}}}}}
Node.js
Video Intelligence への認証を行うには、アプリケーションのデフォルト認証情報を設定します。
詳細については、ローカル開発環境の認証を設定するをご覧ください。
/** * TODO(developer): Uncomment these variables before running the sample. */// const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4';// Imports the Google Cloud Video Intelligence library + Node's fs libraryconstVideo=require('@google-cloud/video-intelligence').v1;// Creates a clientconstvideo=newVideo.VideoIntelligenceServiceClient();asyncfunctiondetectFacesGCS(){constrequest={inputUri:gcsUri,features:['FACE_DETECTION'],videoContext:{faceDetectionConfig:{// Must set includeBoundingBoxes to true to get facial attributes.includeBoundingBoxes:true,includeAttributes:true,},},};// Detects faces in a video// We get the first result because we only process 1 videoconst[operation]=awaitvideo.annotateVideo(request);constresults=awaitoperation.promise();console.log('Waiting for operation to complete...');// Gets annotations for videoconstfaceAnnotations=results[0].annotationResults[0].faceDetectionAnnotations;for(const{tracks}offaceAnnotations){console.log('Face detected:');for(const{segment,timestampedObjects}oftracks){console.log(`\tStart: ${segment.startTimeOffset.seconds}.`+`${(segment.startTimeOffset.nanos/1e6).toFixed(0)}s`);console.log(`\tEnd: ${segment.endTimeOffset.seconds}.`+`${(segment.endTimeOffset.nanos/1e6).toFixed(0)}s`);// Each segment includes timestamped objects that// include characteristics of the face detected.const[firstTimestapedObject]=timestampedObjects;for(const{name}offirstTimestapedObject.attributes){// Attributes include 'glasses', 'headwear', 'smiling'.console.log(`\tAttribute: ${name}; `);}}}}detectFacesGCS();
Python
Video Intelligence への認証を行うには、アプリケーションのデフォルト認証情報を設定します。
詳細については、ローカル開発環境の認証を設定するをご覧ください。
fromgoogle.cloudimportvideointelligence_v1asvideointelligencedefdetect_faces(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"):"""Detects faces in a video."""client=videointelligence.VideoIntelligenceServiceClient()# Configure the requestconfig=videointelligence.FaceDetectionConfig(include_bounding_boxes=True,include_attributes=True)context=videointelligence.VideoContext(face_detection_config=config)# Start the asynchronous requestoperation=client.annotate_video(request={"features":[videointelligence.Feature.FACE_DETECTION],"input_uri":gcs_uri,"video_context":context,})print("\nProcessing video for face detection annotations.")result=operation.result(timeout=300)print("\nFinished processing.\n")# Retrieve the first result, because a single video was processed.annotation_result=result.annotation_results[0]forannotationinannotation_result.face_detection_annotations:print("Face detected:")fortrackinannotation.tracks:print("Segment: {}s to {}s".format(track.segment.start_time_offset.seconds+track.segment.start_time_offset.microseconds/1e6,track.segment.end_time_offset.seconds+track.segment.end_time_offset.microseconds/1e6,))# Each segment includes timestamped faces that include# characteristics of the face detected.# Grab the first timestamped facetimestamped_object=track.timestamped_objects[0]box=timestamped_object.normalized_bounding_boxprint("Bounding box:")print("\tleft : {}".format(box.left))print("\ttop : {}".format(box.top))print("\tright : {}".format(box.right))print("\tbottom: {}".format(box.bottom))# Attributes include glasses, headwear, smiling, direction of gazeprint("Attributes:")forattributeintimestamped_object.attributes:print("\t{}:{}{}".format(attribute.name,attribute.value,attribute.confidence))
Video Intelligence への認証を行うには、アプリケーションのデフォルト認証情報を設定します。
詳細については、ローカル開発環境の認証を設定するをご覧ください。
importcom.google.api.gax.longrunning.OperationFuture;importcom.google.cloud.videointelligence.v1.AnnotateVideoProgress;importcom.google.cloud.videointelligence.v1.AnnotateVideoRequest;importcom.google.cloud.videointelligence.v1.AnnotateVideoResponse;importcom.google.cloud.videointelligence.v1.DetectedAttribute;importcom.google.cloud.videointelligence.v1.FaceDetectionAnnotation;importcom.google.cloud.videointelligence.v1.FaceDetectionConfig;importcom.google.cloud.videointelligence.v1.Feature;importcom.google.cloud.videointelligence.v1.TimestampedObject;importcom.google.cloud.videointelligence.v1.Track;importcom.google.cloud.videointelligence.v1.VideoAnnotationResults;importcom.google.cloud.videointelligence.v1.VideoContext;importcom.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;importcom.google.cloud.videointelligence.v1.VideoSegment;importcom.google.protobuf.ByteString;importjava.nio.file.Files;importjava.nio.file.Path;importjava.nio.file.Paths;publicclassDetectFaces{publicstaticvoiddetectFaces()throwsException{// TODO(developer): Replace these variables before running the sample.StringlocalFilePath="resources/googlework_short.mp4";detectFaces(localFilePath);}// Detects faces in a video stored in a local file using the Cloud Video Intelligence API.publicstaticvoiddetectFaces(StringlocalFilePath)throwsException{try(VideoIntelligenceServiceClientvideoIntelligenceServiceClient=VideoIntelligenceServiceClient.create()){// Reads a local video file and converts it to base64.Pathpath=Paths.get(localFilePath);byte[]data=Files.readAllBytes(path);ByteStringinputContent=ByteString.copyFrom(data);FaceDetectionConfigfaceDetectionConfig=FaceDetectionConfig.newBuilder()// Must set includeBoundingBoxes to true to get facial attributes..setIncludeBoundingBoxes(true).setIncludeAttributes(true).build();VideoContextvideoContext=VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();AnnotateVideoRequestrequest=AnnotateVideoRequest.newBuilder().setInputContent(inputContent).addFeatures(Feature.FACE_DETECTION).setVideoContext(videoContext).build();// Detects faces in a videoOperationFuture<AnnotateVideoResponse,AnnotateVideoProgress>future=videoIntelligenceServiceClient.annotateVideoAsync(request);System.out.println("Waiting for operation to complete...");AnnotateVideoResponseresponse=future.get();// Gets annotations for videoVideoAnnotationResultsannotationResult=response.getAnnotationResultsList().get(0);// Annotations for list of faces detected, tracked and recognized in video.for(FaceDetectionAnnotationfaceDetectionAnnotation:annotationResult.getFaceDetectionAnnotationsList()){System.out.print("Face detected:\n");for(Tracktrack:faceDetectionAnnotation.getTracksList()){VideoSegmentsegment=track.getSegment();System.out.printf("\tStart: %d.%.0fs\n",segment.getStartTimeOffset().getSeconds(),segment.getStartTimeOffset().getNanos()/1e6);System.out.printf("\tEnd: %d.%.0fs\n",segment.getEndTimeOffset().getSeconds(),segment.getEndTimeOffset().getNanos()/1e6);// Each segment includes timestamped objects that// include characteristics of the face detected.TimestampedObjectfirstTimestampedObject=track.getTimestampedObjects(0);for(DetectedAttributeattribute:firstTimestampedObject.getAttributesList()){// Attributes include glasses, headwear, smiling, direction of gazeSystem.out.printf("\tAttribute %s: %s %s\n",attribute.getName(),attribute.getValue(),attribute.getConfidence());}}}}}}
Node.js
Video Intelligence への認証を行うには、アプリケーションのデフォルト認証情報を設定します。
詳細については、ローカル開発環境の認証を設定するをご覧ください。
/** * TODO(developer): Uncomment these variables before running the sample. */// const path = 'Local file to analyze, e.g. ./my-file.mp4';// Imports the Google Cloud Video Intelligence library + Node's fs libraryconstVideo=require('@google-cloud/video-intelligence').v1;constfs=require('fs');// Creates a clientconstvideo=newVideo.VideoIntelligenceServiceClient();// Reads a local video file and converts it to base64constfile=fs.readFileSync(path);constinputContent=file.toString('base64');asyncfunctiondetectFaces(){constrequest={inputContent:inputContent,features:['FACE_DETECTION'],videoContext:{faceDetectionConfig:{// Must set includeBoundingBoxes to true to get facial attributes.includeBoundingBoxes:true,includeAttributes:true,},},};// Detects faces in a video// We get the first result because we only process 1 videoconst[operation]=awaitvideo.annotateVideo(request);constresults=awaitoperation.promise();console.log('Waiting for operation to complete...');// Gets annotations for videoconstfaceAnnotations=results[0].annotationResults[0].faceDetectionAnnotations;for(const{tracks}offaceAnnotations){console.log('Face detected:');for(const{segment,timestampedObjects}oftracks){console.log(`\tStart: ${segment.startTimeOffset.seconds}`+`.${(segment.startTimeOffset.nanos/1e6).toFixed(0)}s`);console.log(`\tEnd: ${segment.endTimeOffset.seconds}.`+`${(segment.endTimeOffset.nanos/1e6).toFixed(0)}s`);// Each segment includes timestamped objects that// include characteristics of the face detected.const[firstTimestapedObject]=timestampedObjects;for(const{name}offirstTimestapedObject.attributes){// Attributes include 'glasses', 'headwear', 'smiling'.console.log(`\tAttribute: ${name}; `);}}}}detectFaces();
Python
Video Intelligence への認証を行うには、アプリケーションのデフォルト認証情報を設定します。
詳細については、ローカル開発環境の認証を設定するをご覧ください。
importiofromgoogle.cloudimportvideointelligence_v1asvideointelligencedefdetect_faces(local_file_path="path/to/your/video-file.mp4"):"""Detects faces in a video from a local file."""client=videointelligence.VideoIntelligenceServiceClient()withio.open(local_file_path,"rb")asf:input_content=f.read()# Configure the requestconfig=videointelligence.FaceDetectionConfig(include_bounding_boxes=True,include_attributes=True)context=videointelligence.VideoContext(face_detection_config=config)# Start the asynchronous requestoperation=client.annotate_video(request={"features":[videointelligence.Feature.FACE_DETECTION],"input_content":input_content,"video_context":context,})print("\nProcessing video for face detection annotations.")result=operation.result(timeout=300)print("\nFinished processing.\n")# Retrieve the first result, because a single video was processed.annotation_result=result.annotation_results[0]forannotationinannotation_result.face_detection_annotations:print("Face detected:")fortrackinannotation.tracks:print("Segment: {}s to {}s".format(track.segment.start_time_offset.seconds+track.segment.start_time_offset.microseconds/1e6,track.segment.end_time_offset.seconds+track.segment.end_time_offset.microseconds/1e6,))# Each segment includes timestamped faces that include# characteristics of the face detected.# Grab the first timestamped facetimestamped_object=track.timestamped_objects[0]box=timestamped_object.normalized_bounding_boxprint("Bounding box:")print("\tleft : {}".format(box.left))print("\ttop : {}".format(box.top))print("\tright : {}".format(box.right))print("\tbottom: {}".format(box.bottom))# Attributes include glasses, headwear, smiling, direction of gazeprint("Attributes:")forattributeintimestamped_object.attributes:print("\t{}:{}{}".format(attribute.name,attribute.value,attribute.confidence))
[[["わかりやすい","easyToUnderstand","thumb-up"],["問題の解決に役立った","solvedMyProblem","thumb-up"],["その他","otherUp","thumb-up"]],[["Hard to understand","hardToUnderstand","thumb-down"],["Incorrect information or sample code","incorrectInformationOrSampleCode","thumb-down"],["Missing the information/samples I need","missingTheInformationSamplesINeed","thumb-down"],["翻訳に関する問題","translationIssue","thumb-down"],["その他","otherDown","thumb-down"]],["最終更新日 2024-12-21 UTC。"],[],[]]