如需向 Video Intelligence 进行身份验证,请设置应用默认凭据。
如需了解详情,请参阅为本地开发环境设置身份验证。
importcom.google.api.gax.longrunning.OperationFuture;importcom.google.cloud.videointelligence.v1.AnnotateVideoProgress;importcom.google.cloud.videointelligence.v1.AnnotateVideoRequest;importcom.google.cloud.videointelligence.v1.AnnotateVideoResponse;importcom.google.cloud.videointelligence.v1.DetectedAttribute;importcom.google.cloud.videointelligence.v1.DetectedLandmark;importcom.google.cloud.videointelligence.v1.Feature;importcom.google.cloud.videointelligence.v1.PersonDetectionAnnotation;importcom.google.cloud.videointelligence.v1.PersonDetectionConfig;importcom.google.cloud.videointelligence.v1.TimestampedObject;importcom.google.cloud.videointelligence.v1.Track;importcom.google.cloud.videointelligence.v1.VideoAnnotationResults;importcom.google.cloud.videointelligence.v1.VideoContext;importcom.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;importcom.google.cloud.videointelligence.v1.VideoSegment;publicclassDetectPersonGcs{publicstaticvoiddetectPersonGcs()throwsException{// TODO(developer): Replace these variables before running the sample.StringgcsUri="gs://cloud-samples-data/video/googlework_short.mp4";detectPersonGcs(gcsUri);}// Detects people in a video stored in Google Cloud Storage using// the Cloud Video Intelligence API.publicstaticvoiddetectPersonGcs(StringgcsUri)throwsException{try(VideoIntelligenceServiceClientvideoIntelligenceServiceClient=VideoIntelligenceServiceClient.create()){// Reads a local video file and converts it to base64.PersonDetectionConfigpersonDetectionConfig=PersonDetectionConfig.newBuilder()// Must set includeBoundingBoxes to true to get poses and attributes..setIncludeBoundingBoxes(true).setIncludePoseLandmarks(true).setIncludeAttributes(true).build();VideoContextvideoContext=VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();AnnotateVideoRequestrequest=AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.PERSON_DETECTION).setVideoContext(videoContext).build();// Detects people in a videoOperationFuture<AnnotateVideoResponse,AnnotateVideoProgress>future=videoIntelligenceServiceClient.annotateVideoAsync(request);System.out.println("Waitingforoperationtocomplete...");AnnotateVideoResponseresponse=future.get();// Get the first response, since we sent only one video.VideoAnnotationResultsannotationResult=response.getAnnotationResultsList().get(0);// Annotations for list of people detected, tracked and recognized in video.for(PersonDetectionAnnotationpersonDetectionAnnotation:annotationResult.getPersonDetectionAnnotationsList()){System.out.print("Persondetected:\n");for(Tracktrack:personDetectionAnnotation.getTracksList()){VideoSegmentsegment=track.getSegment();System.out.printf("\tStart:%d.%.0fs\n",segment.getStartTimeOffset().getSeconds(),segment.getStartTimeOffset().getNanos()/1e6);System.out.printf("\tEnd:%d.%.0fs\n",segment.getEndTimeOffset().getSeconds(),segment.getEndTimeOffset().getNanos()/1e6);// Each segment includes timestamped objects that include characteristic--e.g. clothes,// posture of the person detected.TimestampedObjectfirstTimestampedObject=track.getTimestampedObjects(0);// Attributes include unique pieces of clothing, poses (i.e., body landmarks)// of the person detected.for(DetectedAttributeattribute:firstTimestampedObject.getAttributesList()){System.out.printf("\tAttribute:%s;Value:%s\n",attribute.getName(),attribute.getValue());}// Landmarks in person detection include body parts.for(DetectedLandmarkattribute:firstTimestampedObject.getLandmarksList()){System.out.printf("\tLandmark:%s;Vertex:%f,%f\n",attribute.getName(),attribute.getPoint().getX(),attribute.getPoint().getY());}}}}}}
Node.js
如需向 Video Intelligence 进行身份验证,请设置应用默认凭据。
如需了解详情,请参阅为本地开发环境设置身份验证。
/** * TODO(developer): Uncomment these variables before running the sample. */// const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4';// Imports the Google Cloud Video Intelligence library + Node's fs libraryconstVideo=require('@google-cloud/video-intelligence').v1;// Creates a clientconstvideo=newVideo.VideoIntelligenceServiceClient();asyncfunctiondetectPersonGCS(){constrequest={inputUri:gcsUri,features:['PERSON_DETECTION'],videoContext:{personDetectionConfig:{// Must set includeBoundingBoxes to true to get poses and attributes.includeBoundingBoxes:true,includePoseLandmarks:true,includeAttributes:true,},},};// Detects faces in a video// We get the first result because we only process 1 videoconst[operation]=awaitvideo.annotateVideo(request);constresults=awaitoperation.promise();console.log('Waitingforoperationtocomplete...');// Gets annotations for videoconstpersonAnnotations=results[0].annotationResults[0].personDetectionAnnotations;for(const{tracks}ofpersonAnnotations){console.log('Persondetected:');for(const{segment,timestampedObjects}oftracks){console.log(`\tStart: ${segment.startTimeOffset.seconds}`+`.${(segment.startTimeOffset.nanos/1e6).toFixed(0)}s`);console.log(`\tEnd: ${segment.endTimeOffset.seconds}.`+`${(segment.endTimeOffset.nanos/1e6).toFixed(0)}s`);// Each segment includes timestamped objects that// include characteristic--e.g. clothes, posture// of the person detected.const[firstTimestampedObject]=timestampedObjects;// Attributes include unique pieces of clothing, poses (i.e., body// landmarks) of the person detected.for(const{name,value}offirstTimestampedObject.attributes){console.log(`\tAttribute: ${name}; Value: ${value}`);}// Landmarks in person detection include body parts.for(const{name,point}offirstTimestampedObject.landmarks){console.log(`\tLandmark: ${name}; Vertex: ${point.x}, ${point.y}`);}}}}detectPersonGCS();
Python
如需向 Video Intelligence 进行身份验证,请设置应用默认凭据。
如需了解详情,请参阅为本地开发环境设置身份验证。
fromgoogle.cloudimportvideointelligence_v1asvideointelligencedefdetect_person(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"):
"""Detectspeopleinavideo."""
client=videointelligence.VideoIntelligenceServiceClient()# Configure the requestconfig=videointelligence.types.PersonDetectionConfig(include_bounding_boxes=True,include_attributes=True,include_pose_landmarks=True,)context=videointelligence.types.VideoContext(person_detection_config=config)# Start the asynchronous requestoperation=client.annotate_video(request={
"features":[videointelligence.Feature.PERSON_DETECTION],
"input_uri":gcs_uri,
"video_context":context,})print("\nProcessingvideoforpersondetectionannotations.")result=operation.result(timeout=300)print("\nFinishedprocessing.\n")# Retrieve the first result, because a single video was processed.annotation_result=result.annotation_results[0]forannotationinannotation_result.person_detection_annotations:print("Persondetected:")fortrackinannotation.tracks:print(
"Segment:{}sto{}s".format(track.segment.start_time_offset.seconds+track.segment.start_time_offset.microseconds/1e6,track.segment.end_time_offset.seconds+track.segment.end_time_offset.microseconds/1e6,))# Each segment includes timestamped objects that include# characteristics - -e.g.clothes, posture of the person detected.# Grab the first timestamped objecttimestamped_object=track.timestamped_objects[0]box=timestamped_object.normalized_bounding_boxprint("Boundingbox:")print("\tleft:{}".format(box.left))print("\ttop:{}".format(box.top))print("\tright:{}".format(box.right))print("\tbottom:{}".format(box.bottom))# Attributes include unique pieces of clothing,# poses, or hair color.print("Attributes:")forattributeintimestamped_object.attributes:print(
"\t{}:{}{}".format(attribute.name,attribute.value,attribute.confidence))# Landmarks in person detection include body parts such as# left_shoulder, right_ear, and right_ankleprint("Landmarks:")forlandmarkintimestamped_object.landmarks:print(
"\t{}:{}(x={},y={})".format(landmark.name,landmark.confidence,landmark.point.x,# Normalized vertexlandmark.point.y,# Normalized vertex))
如需向 Video Intelligence 进行身份验证,请设置应用默认凭据。
如需了解详情,请参阅为本地开发环境设置身份验证。
importcom.google.api.gax.longrunning.OperationFuture;importcom.google.cloud.videointelligence.v1.AnnotateVideoProgress;importcom.google.cloud.videointelligence.v1.AnnotateVideoRequest;importcom.google.cloud.videointelligence.v1.AnnotateVideoResponse;importcom.google.cloud.videointelligence.v1.DetectedAttribute;importcom.google.cloud.videointelligence.v1.DetectedLandmark;importcom.google.cloud.videointelligence.v1.Feature;importcom.google.cloud.videointelligence.v1.PersonDetectionAnnotation;importcom.google.cloud.videointelligence.v1.PersonDetectionConfig;importcom.google.cloud.videointelligence.v1.TimestampedObject;importcom.google.cloud.videointelligence.v1.Track;importcom.google.cloud.videointelligence.v1.VideoAnnotationResults;importcom.google.cloud.videointelligence.v1.VideoContext;importcom.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;importcom.google.cloud.videointelligence.v1.VideoSegment;importcom.google.protobuf.ByteString;importjava.nio.file.Files;importjava.nio.file.Path;importjava.nio.file.Paths;publicclassDetectPerson{publicstaticvoiddetectPerson()throwsException{// TODO(developer): Replace these variables before running the sample.StringlocalFilePath="resources/googlework_short.mp4";detectPerson(localFilePath);}// Detects people in a video stored in a local file using the Cloud Video Intelligence API.publicstaticvoiddetectPerson(StringlocalFilePath)throwsException{try(VideoIntelligenceServiceClientvideoIntelligenceServiceClient=VideoIntelligenceServiceClient.create()){// Reads a local video file and converts it to base64.Pathpath=Paths.get(localFilePath);byte[]data=Files.readAllBytes(path);ByteStringinputContent=ByteString.copyFrom(data);PersonDetectionConfigpersonDetectionConfig=PersonDetectionConfig.newBuilder()// Must set includeBoundingBoxes to true to get poses and attributes..setIncludeBoundingBoxes(true).setIncludePoseLandmarks(true).setIncludeAttributes(true).build();VideoContextvideoContext=VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();AnnotateVideoRequestrequest=AnnotateVideoRequest.newBuilder().setInputContent(inputContent).addFeatures(Feature.PERSON_DETECTION).setVideoContext(videoContext).build();// Detects people in a video// We get the first result because only one video is processed.OperationFuture<AnnotateVideoResponse,AnnotateVideoProgress>future=videoIntelligenceServiceClient.annotateVideoAsync(request);System.out.println("Waitingforoperationtocomplete...");AnnotateVideoResponseresponse=future.get();// Gets annotations for videoVideoAnnotationResultsannotationResult=response.getAnnotationResultsList().get(0);// Annotations for list of people detected, tracked and recognized in video.for(PersonDetectionAnnotationpersonDetectionAnnotation:annotationResult.getPersonDetectionAnnotationsList()){System.out.print("Persondetected:\n");for(Tracktrack:personDetectionAnnotation.getTracksList()){VideoSegmentsegment=track.getSegment();System.out.printf("\tStart:%d.%.0fs\n",segment.getStartTimeOffset().getSeconds(),segment.getStartTimeOffset().getNanos()/1e6);System.out.printf("\tEnd:%d.%.0fs\n",segment.getEndTimeOffset().getSeconds(),segment.getEndTimeOffset().getNanos()/1e6);// Each segment includes timestamped objects that include characteristic--e.g. clothes,// posture of the person detected.TimestampedObjectfirstTimestampedObject=track.getTimestampedObjects(0);// Attributes include unique pieces of clothing, poses (i.e., body landmarks)// of the person detected.for(DetectedAttributeattribute:firstTimestampedObject.getAttributesList()){System.out.printf("\tAttribute:%s;Value:%s\n",attribute.getName(),attribute.getValue());}// Landmarks in person detection include body parts.for(DetectedLandmarkattribute:firstTimestampedObject.getLandmarksList()){System.out.printf("\tLandmark:%s;Vertex:%f,%f\n",attribute.getName(),attribute.getPoint().getX(),attribute.getPoint().getY());}}}}}}
Node.js
如需向 Video Intelligence 进行身份验证,请设置应用默认凭据。
如需了解详情,请参阅为本地开发环境设置身份验证。
/** * TODO(developer): Uncomment these variables before running the sample. */// const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4';// Imports the Google Cloud Video Intelligence library + Node's fs libraryconstVideo=require('@google-cloud/video-intelligence').v1;constfs=require('fs');// Creates a clientconstvideo=newVideo.VideoIntelligenceServiceClient();/** * TODO(developer): Uncomment the following line before running the sample. */// const path = 'Local file to analyze, e.g. ./my-file.mp4';// Reads a local video file and converts it to base64constfile=fs.readFileSync(path);constinputContent=file.toString('base64');asyncfunctiondetectPerson(){constrequest={inputContent:inputContent,features:['PERSON_DETECTION'],videoContext:{personDetectionConfig:{// Must set includeBoundingBoxes to true to get poses and attributes.includeBoundingBoxes:true,includePoseLandmarks:true,includeAttributes:true,},},};// Detects faces in a video// We get the first result because we only process 1 videoconst[operation]=awaitvideo.annotateVideo(request);constresults=awaitoperation.promise();console.log('Waitingforoperationtocomplete...');// Gets annotations for videoconstpersonAnnotations=results[0].annotationResults[0].personDetectionAnnotations;for(const{tracks}ofpersonAnnotations){console.log('Persondetected:');for(const{segment,timestampedObjects}oftracks){console.log(`\tStart: ${segment.startTimeOffset.seconds}`+`.${(segment.startTimeOffset.nanos/1e6).toFixed(0)}s`);console.log(`\tEnd: ${segment.endTimeOffset.seconds}.`+`${(segment.endTimeOffset.nanos/1e6).toFixed(0)}s`);// Each segment includes timestamped objects that// include characteristic--e.g. clothes, posture// of the person detected.const[firstTimestampedObject]=timestampedObjects;// Attributes include unique pieces of clothing, poses (i.e., body// landmarks) of the person detected.for(const{name,value}offirstTimestampedObject.attributes){console.log(`\tAttribute: ${name}; Value: ${value}`);}// Landmarks in person detection include body parts.for(const{name,point}offirstTimestampedObject.landmarks){console.log(`\tLandmark: ${name}; Vertex: ${point.x}, ${point.y}`);}}}}detectPerson();
Python
如需向 Video Intelligence 进行身份验证,请设置应用默认凭据。
如需了解详情,请参阅为本地开发环境设置身份验证。
importiofromgoogle.cloudimportvideointelligence_v1asvideointelligencedefdetect_person(local_file_path="path/to/your/video-file.mp4"):
"""Detectspeopleinavideofromalocalfile."""
client=videointelligence.VideoIntelligenceServiceClient()withio.open(local_file_path, "rb")asf:input_content=f.read()# Configure the requestconfig=videointelligence.types.PersonDetectionConfig(include_bounding_boxes=True,include_attributes=True,include_pose_landmarks=True,)context=videointelligence.types.VideoContext(person_detection_config=config)# Start the asynchronous requestoperation=client.annotate_video(request={
"features":[videointelligence.Feature.PERSON_DETECTION],
"input_content":input_content,
"video_context":context,})print("\nProcessingvideoforpersondetectionannotations.")result=operation.result(timeout=300)print("\nFinishedprocessing.\n")# Retrieve the first result, because a single video was processed.annotation_result=result.annotation_results[0]forannotationinannotation_result.person_detection_annotations:print("Persondetected:")fortrackinannotation.tracks:print(
"Segment:{}sto{}s".format(track.segment.start_time_offset.seconds+track.segment.start_time_offset.microseconds/1e6,track.segment.end_time_offset.seconds+track.segment.end_time_offset.microseconds/1e6,))# Each segment includes timestamped objects that include# characteristic - -e.g.clothes, posture of the person detected.# Grab the first timestamped objecttimestamped_object=track.timestamped_objects[0]box=timestamped_object.normalized_bounding_boxprint("Boundingbox:")print("\tleft:{}".format(box.left))print("\ttop:{}".format(box.top))print("\tright:{}".format(box.right))print("\tbottom:{}".format(box.bottom))# Attributes include unique pieces of clothing,# poses, or hair color.print("Attributes:")forattributeintimestamped_object.attributes:print(
"\t{}:{}{}".format(attribute.name,attribute.value,attribute.confidence))# Landmarks in person detection include body parts such as# left_shoulder, right_ear, and right_ankleprint("Landmarks:")forlandmarkintimestamped_object.landmarks:print(
"\t{}:{}(x={},y={})".format(landmark.name,landmark.confidence,landmark.point.x,# Normalized vertexlandmark.point.y,# Normalized vertex))
[[["易于理解","easyToUnderstand","thumb-up"],["解决了我的问题","solvedMyProblem","thumb-up"],["其他","otherUp","thumb-up"]],[["Hard to understand","hardToUnderstand","thumb-down"],["Incorrect information or sample code","incorrectInformationOrSampleCode","thumb-down"],["Missing the information/samples I need","missingTheInformationSamplesINeed","thumb-down"],["翻译问题","translationIssue","thumb-down"],["其他","otherDown","thumb-down"]],["最后更新时间 (UTC):2024-09-15。"],[],[]]