如需向 Video Intelligence 进行身份验证,请设置应用默认凭据。
如需了解详情,请参阅为本地开发环境设置身份验证。
importcom.google.api.gax.longrunning.OperationFuture;importcom.google.cloud.videointelligence.v1.AnnotateVideoProgress;importcom.google.cloud.videointelligence.v1.AnnotateVideoRequest;importcom.google.cloud.videointelligence.v1.AnnotateVideoResponse;importcom.google.cloud.videointelligence.v1.DetectedAttribute;importcom.google.cloud.videointelligence.v1.FaceDetectionAnnotation;importcom.google.cloud.videointelligence.v1.FaceDetectionConfig;importcom.google.cloud.videointelligence.v1.Feature;importcom.google.cloud.videointelligence.v1.TimestampedObject;importcom.google.cloud.videointelligence.v1.Track;importcom.google.cloud.videointelligence.v1.VideoAnnotationResults;importcom.google.cloud.videointelligence.v1.VideoContext;importcom.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;importcom.google.cloud.videointelligence.v1.VideoSegment;publicclassDetectFacesGcs{publicstaticvoiddetectFacesGcs()throwsException{// TODO(developer): Replace these variables before running the sample.StringgcsUri="gs://cloud-samples-data/video/googlework_short.mp4";detectFacesGcs(gcsUri);}// Detects faces in a video stored in Google Cloud Storage using the Cloud Video Intelligence API.publicstaticvoiddetectFacesGcs(StringgcsUri)throwsException{try(VideoIntelligenceServiceClientvideoIntelligenceServiceClient=VideoIntelligenceServiceClient.create()){FaceDetectionConfigfaceDetectionConfig=FaceDetectionConfig.newBuilder()// Must set includeBoundingBoxes to true to get facial attributes..setIncludeBoundingBoxes(true).setIncludeAttributes(true).build();VideoContextvideoContext=VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();AnnotateVideoRequestrequest=AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.FACE_DETECTION).setVideoContext(videoContext).build();// Detects faces in a videoOperationFuture<AnnotateVideoResponse,AnnotateVideoProgress>future=videoIntelligenceServiceClient.annotateVideoAsync(request);System.out.println("Waitingforoperationtocomplete...");AnnotateVideoResponseresponse=future.get();// Gets annotations for videoVideoAnnotationResultsannotationResult=response.getAnnotationResultsList().get(0);// Annotations for list of people detected, tracked and recognized in video.for(FaceDetectionAnnotationfaceDetectionAnnotation:annotationResult.getFaceDetectionAnnotationsList()){System.out.print("Facedetected:\n");for(Tracktrack:faceDetectionAnnotation.getTracksList()){VideoSegmentsegment=track.getSegment();System.out.printf("\tStart:%d.%.0fs\n",segment.getStartTimeOffset().getSeconds(),segment.getStartTimeOffset().getNanos()/1e6);System.out.printf("\tEnd:%d.%.0fs\n",segment.getEndTimeOffset().getSeconds(),segment.getEndTimeOffset().getNanos()/1e6);// Each segment includes timestamped objects that// include characteristics of the face detected.TimestampedObjectfirstTimestampedObject=track.getTimestampedObjects(0);for(DetectedAttributeattribute:firstTimestampedObject.getAttributesList()){// Attributes include glasses, headwear, smiling, direction of gazeSystem.out.printf("\tAttribute%s:%s%s\n",attribute.getName(),attribute.getValue(),attribute.getConfidence());}}}}}}
Node.js
如需向 Video Intelligence 进行身份验证,请设置应用默认凭据。
如需了解详情,请参阅为本地开发环境设置身份验证。
/** * TODO(developer): Uncomment these variables before running the sample. */// const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4';// Imports the Google Cloud Video Intelligence library + Node's fs libraryconstVideo=require('@google-cloud/video-intelligence').v1;// Creates a clientconstvideo=newVideo.VideoIntelligenceServiceClient();asyncfunctiondetectFacesGCS(){constrequest={inputUri:gcsUri,features:['FACE_DETECTION'],videoContext:{faceDetectionConfig:{// Must set includeBoundingBoxes to true to get facial attributes.includeBoundingBoxes:true,includeAttributes:true,},},};// Detects faces in a video// We get the first result because we only process 1 videoconst[operation]=awaitvideo.annotateVideo(request);constresults=awaitoperation.promise();console.log('Waitingforoperationtocomplete...');// Gets annotations for videoconstfaceAnnotations=results[0].annotationResults[0].faceDetectionAnnotations;for(const{tracks}offaceAnnotations){console.log('Facedetected:');for(const{segment,timestampedObjects}oftracks){console.log(`\tStart: ${segment.startTimeOffset.seconds}.`+`${(segment.startTimeOffset.nanos/1e6).toFixed(0)}s`);console.log(`\tEnd: ${segment.endTimeOffset.seconds}.`+`${(segment.endTimeOffset.nanos/1e6).toFixed(0)}s`);// Each segment includes timestamped objects that// include characteristics of the face detected.const[firstTimestapedObject]=timestampedObjects;for(const{name}offirstTimestapedObject.attributes){// Attributes include 'glasses', 'headwear', 'smiling'.console.log(`\tAttribute: ${name}; `);}}}}detectFacesGCS();
Python
如需向 Video Intelligence 进行身份验证,请设置应用默认凭据。
如需了解详情,请参阅为本地开发环境设置身份验证。
fromgoogle.cloudimportvideointelligence_v1asvideointelligencedefdetect_faces(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"):
"""Detectsfacesinavideo."""
client=videointelligence.VideoIntelligenceServiceClient()# Configure the requestconfig=videointelligence.FaceDetectionConfig(include_bounding_boxes=True,include_attributes=True)context=videointelligence.VideoContext(face_detection_config=config)# Start the asynchronous requestoperation=client.annotate_video(request={
"features":[videointelligence.Feature.FACE_DETECTION],
"input_uri":gcs_uri,
"video_context":context,})print("\nProcessingvideoforfacedetectionannotations.")result=operation.result(timeout=300)print("\nFinishedprocessing.\n")# Retrieve the first result, because a single video was processed.annotation_result=result.annotation_results[0]forannotationinannotation_result.face_detection_annotations:print("Facedetected:")fortrackinannotation.tracks:print(
"Segment:{}sto{}s".format(track.segment.start_time_offset.seconds+track.segment.start_time_offset.microseconds/1e6,track.segment.end_time_offset.seconds+track.segment.end_time_offset.microseconds/1e6,))# Each segment includes timestamped faces that include# characteristics of the face detected.# Grab the first timestamped facetimestamped_object=track.timestamped_objects[0]box=timestamped_object.normalized_bounding_boxprint("Boundingbox:")print("\tleft:{}".format(box.left))print("\ttop:{}".format(box.top))print("\tright:{}".format(box.right))print("\tbottom:{}".format(box.bottom))# Attributes include glasses, headwear, smiling, direction of gazeprint("Attributes:")forattributeintimestamped_object.attributes:print(
"\t{}:{}{}".format(attribute.name,attribute.value,attribute.confidence))
如需向 Video Intelligence 进行身份验证,请设置应用默认凭据。
如需了解详情,请参阅为本地开发环境设置身份验证。
importcom.google.api.gax.longrunning.OperationFuture;importcom.google.cloud.videointelligence.v1.AnnotateVideoProgress;importcom.google.cloud.videointelligence.v1.AnnotateVideoRequest;importcom.google.cloud.videointelligence.v1.AnnotateVideoResponse;importcom.google.cloud.videointelligence.v1.DetectedAttribute;importcom.google.cloud.videointelligence.v1.FaceDetectionAnnotation;importcom.google.cloud.videointelligence.v1.FaceDetectionConfig;importcom.google.cloud.videointelligence.v1.Feature;importcom.google.cloud.videointelligence.v1.TimestampedObject;importcom.google.cloud.videointelligence.v1.Track;importcom.google.cloud.videointelligence.v1.VideoAnnotationResults;importcom.google.cloud.videointelligence.v1.VideoContext;importcom.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;importcom.google.cloud.videointelligence.v1.VideoSegment;importcom.google.protobuf.ByteString;importjava.nio.file.Files;importjava.nio.file.Path;importjava.nio.file.Paths;publicclassDetectFaces{publicstaticvoiddetectFaces()throwsException{// TODO(developer): Replace these variables before running the sample.StringlocalFilePath="resources/googlework_short.mp4";detectFaces(localFilePath);}// Detects faces in a video stored in a local file using the Cloud Video Intelligence API.publicstaticvoiddetectFaces(StringlocalFilePath)throwsException{try(VideoIntelligenceServiceClientvideoIntelligenceServiceClient=VideoIntelligenceServiceClient.create()){// Reads a local video file and converts it to base64.Pathpath=Paths.get(localFilePath);byte[]data=Files.readAllBytes(path);ByteStringinputContent=ByteString.copyFrom(data);FaceDetectionConfigfaceDetectionConfig=FaceDetectionConfig.newBuilder()// Must set includeBoundingBoxes to true to get facial attributes..setIncludeBoundingBoxes(true).setIncludeAttributes(true).build();VideoContextvideoContext=VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();AnnotateVideoRequestrequest=AnnotateVideoRequest.newBuilder().setInputContent(inputContent).addFeatures(Feature.FACE_DETECTION).setVideoContext(videoContext).build();// Detects faces in a videoOperationFuture<AnnotateVideoResponse,AnnotateVideoProgress>future=videoIntelligenceServiceClient.annotateVideoAsync(request);System.out.println("Waitingforoperationtocomplete...");AnnotateVideoResponseresponse=future.get();// Gets annotations for videoVideoAnnotationResultsannotationResult=response.getAnnotationResultsList().get(0);// Annotations for list of faces detected, tracked and recognized in video.for(FaceDetectionAnnotationfaceDetectionAnnotation:annotationResult.getFaceDetectionAnnotationsList()){System.out.print("Facedetected:\n");for(Tracktrack:faceDetectionAnnotation.getTracksList()){VideoSegmentsegment=track.getSegment();System.out.printf("\tStart:%d.%.0fs\n",segment.getStartTimeOffset().getSeconds(),segment.getStartTimeOffset().getNanos()/1e6);System.out.printf("\tEnd:%d.%.0fs\n",segment.getEndTimeOffset().getSeconds(),segment.getEndTimeOffset().getNanos()/1e6);// Each segment includes timestamped objects that// include characteristics of the face detected.TimestampedObjectfirstTimestampedObject=track.getTimestampedObjects(0);for(DetectedAttributeattribute:firstTimestampedObject.getAttributesList()){// Attributes include glasses, headwear, smiling, direction of gazeSystem.out.printf("\tAttribute%s:%s%s\n",attribute.getName(),attribute.getValue(),attribute.getConfidence());}}}}}}
Node.js
如需向 Video Intelligence 进行身份验证,请设置应用默认凭据。
如需了解详情,请参阅为本地开发环境设置身份验证。
/** * TODO(developer): Uncomment these variables before running the sample. */// const path = 'Local file to analyze, e.g. ./my-file.mp4';// Imports the Google Cloud Video Intelligence library + Node's fs libraryconstVideo=require('@google-cloud/video-intelligence').v1;constfs=require('fs');// Creates a clientconstvideo=newVideo.VideoIntelligenceServiceClient();// Reads a local video file and converts it to base64constfile=fs.readFileSync(path);constinputContent=file.toString('base64');asyncfunctiondetectFaces(){constrequest={inputContent:inputContent,features:['FACE_DETECTION'],videoContext:{faceDetectionConfig:{// Must set includeBoundingBoxes to true to get facial attributes.includeBoundingBoxes:true,includeAttributes:true,},},};// Detects faces in a video// We get the first result because we only process 1 videoconst[operation]=awaitvideo.annotateVideo(request);constresults=awaitoperation.promise();console.log('Waitingforoperationtocomplete...');// Gets annotations for videoconstfaceAnnotations=results[0].annotationResults[0].faceDetectionAnnotations;for(const{tracks}offaceAnnotations){console.log('Facedetected:');for(const{segment,timestampedObjects}oftracks){console.log(`\tStart: ${segment.startTimeOffset.seconds}`+`.${(segment.startTimeOffset.nanos/1e6).toFixed(0)}s`);console.log(`\tEnd: ${segment.endTimeOffset.seconds}.`+`${(segment.endTimeOffset.nanos/1e6).toFixed(0)}s`);// Each segment includes timestamped objects that// include characteristics of the face detected.const[firstTimestapedObject]=timestampedObjects;for(const{name}offirstTimestapedObject.attributes){// Attributes include 'glasses', 'headwear', 'smiling'.console.log(`\tAttribute: ${name}; `);}}}}detectFaces();
Python
如需向 Video Intelligence 进行身份验证,请设置应用默认凭据。
如需了解详情,请参阅为本地开发环境设置身份验证。
importiofromgoogle.cloudimportvideointelligence_v1asvideointelligencedefdetect_faces(local_file_path="path/to/your/video-file.mp4"):
"""Detectsfacesinavideofromalocalfile."""
client=videointelligence.VideoIntelligenceServiceClient()withio.open(local_file_path, "rb")asf:input_content=f.read()# Configure the requestconfig=videointelligence.FaceDetectionConfig(include_bounding_boxes=True,include_attributes=True)context=videointelligence.VideoContext(face_detection_config=config)# Start the asynchronous requestoperation=client.annotate_video(request={
"features":[videointelligence.Feature.FACE_DETECTION],
"input_content":input_content,
"video_context":context,})print("\nProcessingvideoforfacedetectionannotations.")result=operation.result(timeout=300)print("\nFinishedprocessing.\n")# Retrieve the first result, because a single video was processed.annotation_result=result.annotation_results[0]forannotationinannotation_result.face_detection_annotations:print("Facedetected:")fortrackinannotation.tracks:print(
"Segment:{}sto{}s".format(track.segment.start_time_offset.seconds+track.segment.start_time_offset.microseconds/1e6,track.segment.end_time_offset.seconds+track.segment.end_time_offset.microseconds/1e6,))# Each segment includes timestamped faces that include# characteristics of the face detected.# Grab the first timestamped facetimestamped_object=track.timestamped_objects[0]box=timestamped_object.normalized_bounding_boxprint("Boundingbox:")print("\tleft:{}".format(box.left))print("\ttop:{}".format(box.top))print("\tright:{}".format(box.right))print("\tbottom:{}".format(box.bottom))# Attributes include glasses, headwear, smiling, direction of gazeprint("Attributes:")forattributeintimestamped_object.attributes:print(
"\t{}:{}{}".format(attribute.name,attribute.value,attribute.confidence))
[[["易于理解","easyToUnderstand","thumb-up"],["解决了我的问题","solvedMyProblem","thumb-up"],["其他","otherUp","thumb-up"]],[["Hard to understand","hardToUnderstand","thumb-down"],["Incorrect information or sample code","incorrectInformationOrSampleCode","thumb-down"],["Missing the information/samples I need","missingTheInformationSamplesINeed","thumb-down"],["翻译问题","translationIssue","thumb-down"],["其他","otherDown","thumb-down"]],["最后更新时间 (UTC):2024-09-15。"],[],[]]