AutoML 모델 생성에 대한 배경 정보는 Vertex AI 초보자 가이드를 참조하세요. AutoML 모델을 만드는 방법에 대한 자세한 내용은 Vertex AI 문서의 'ML 모델 개발 및 사용'에서 동영상 데이터를 참조하세요.
AutoML 모델 사용
다음 코드 샘플은 스트리밍 클라이언트 라이브러리를 사용하는 동영상 분류를 위해 AutoML 모델을 사용하는 방법을 보여줍니다.
Java
Video Intelligence에 인증하려면 애플리케이션 기본 사용자 인증 정보를 설정합니다.
자세한 내용은 로컬 개발 환경의 인증 설정을 참조하세요.
importcom.google.api.gax.rpc.BidiStream;importcom.google.cloud.videointelligence.v1p3beta1.LabelAnnotation;importcom.google.cloud.videointelligence.v1p3beta1.LabelFrame;importcom.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest;importcom.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse;importcom.google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig;importcom.google.cloud.videointelligence.v1p3beta1.StreamingFeature;importcom.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults;importcom.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig;importcom.google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient;importcom.google.protobuf.ByteString;importio.grpc.StatusRuntimeException;importjava.io.IOException;importjava.nio.file.Files;importjava.nio.file.Path;importjava.nio.file.Paths;importjava.util.Arrays;importjava.util.concurrent.TimeoutException;classStreamingAutoMlClassification{// Perform streaming video classification with an AutoML ModelstaticvoidstreamingAutoMlClassification(StringfilePath,StringprojectId,StringmodelId)throwsTimeoutException,StatusRuntimeException,IOException{// String filePath = "path_to_your_video_file";// String projectId = "YOUR_GCP_PROJECT_ID";// String modelId = "YOUR_AUTO_ML_CLASSIFICATION_MODEL_ID";try(StreamingVideoIntelligenceServiceClientclient=StreamingVideoIntelligenceServiceClient.create()){Pathpath=Paths.get(filePath);byte[]data=Files.readAllBytes(path);// Set the chunk size to 5MB (recommended less than 10MB).intchunkSize=5*1024*1024;intnumChunks=(int)Math.ceil((double)data.length/chunkSize);StringmodelPath=String.format("projects/%s/locations/us-central1/models/%s",projectId,modelId);System.out.println(modelPath);StreamingAutomlClassificationConfigstreamingAutomlClassificationConfig=StreamingAutomlClassificationConfig.newBuilder().setModelName(modelPath).build();StreamingVideoConfigstreamingVideoConfig=StreamingVideoConfig.newBuilder().setFeature(StreamingFeature.STREAMING_AUTOML_CLASSIFICATION).setAutomlClassificationConfig(streamingAutomlClassificationConfig).build();BidiStream<StreamingAnnotateVideoRequest,StreamingAnnotateVideoResponse>call=client.streamingAnnotateVideoCallable().call();// The first request must **only** contain the audio configuration:call.send(StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());// Subsequent requests must **only** contain the audio data.// Send the requests in chunksfor(inti=0;i < numChunks;i++){call.send(StreamingAnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(Arrays.copyOfRange(data,i*chunkSize,i*chunkSize+chunkSize))).build());}// Tell the service you are done sending datacall.closeSend();for(StreamingAnnotateVideoResponseresponse:call){if(response.hasError()){System.out.println(response.getError().getMessage());break;}StreamingVideoAnnotationResultsannotationResults=response.getAnnotationResults();for(LabelAnnotationannotation:annotationResults.getLabelAnnotationsList()){Stringentity=annotation.getEntity().getDescription();// There is only one frame per annotationLabelFramelabelFrame=annotation.getFrames(0);doubleoffset=labelFrame.getTimeOffset().getSeconds()+labelFrame.getTimeOffset().getNanos()/1e9;floatconfidence=labelFrame.getConfidence();System.out.format("At %fs segment: %s (%f)\n",offset,entity,confidence);}}System.out.println("Video streamed successfully.");}}}
Node.js
Video Intelligence에 인증하려면 애플리케이션 기본 사용자 인증 정보를 설정합니다.
자세한 내용은 로컬 개발 환경의 인증 설정을 참조하세요.
/** * TODO(developer): Uncomment these variables before running the sample. */// const path = 'Local file to analyze, e.g. ./my-file.mp4';// const modelId = 'autoMl model'// const projectId = 'Your GCP Project'const{StreamingVideoIntelligenceServiceClient}=require('@google-cloud/video-intelligence').v1p3beta1;constfs=require('fs');// Instantiates a clientconstclient=newStreamingVideoIntelligenceServiceClient();// Streaming configurationconstmodelPath=`projects/${projectId}/locations/us-central1/models/${modelId}`;constconfigRequest={videoConfig:{feature:'STREAMING_AUTOML_CLASSIFICATION',automlClassificationConfig:{modelName:modelPath,},},};constreadStream=fs.createReadStream(path,{highWaterMark:5*1024*1024,//chunk size set to 5MB (recommended less than 10MB)encoding:'base64',});//Load file content// Note: Input videos must have supported video codecs. See// https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs// for more details.constchunks=[];readStream.on('data',chunk=>{constrequest={inputContent:chunk.toString(),};chunks.push(request);}).on('close',()=>{// configRequest should be the first in the stream of requestsstream.write(configRequest);for(leti=0;i < chunks.length;i++){stream.write(chunks[i]);}stream.end();});conststream=client.streamingAnnotateVideo().on('data',response=>{//Gets annotations for videoconstannotations=response.annotationResults;constlabels=annotations.labelAnnotations;labels.forEach(label=>{console.log(`Label ${label.entity.description} occurs at: ${label.frames[0].timeOffset.seconds||0}`+`.${(label.frames[0].timeOffset.nanos/1e6).toFixed(0)}s`);console.log(` Confidence: ${label.frames[0].confidence}`);});}).on('error',response=>{console.error(response);});
Python
Video Intelligence에 인증하려면 애플리케이션 기본 사용자 인증 정보를 설정합니다.
자세한 내용은 로컬 개발 환경의 인증 설정을 참조하세요.
importiofromgoogle.cloudimportvideointelligence_v1p3beta1asvideointelligence# path = 'path_to_file'# project_id = 'gcp_project_id'# model_id = 'automl_classification_model_id'client=videointelligence.StreamingVideoIntelligenceServiceClient()model_path="projects/{}/locations/us-central1/models/{}".format(project_id,model_id)# Here we use classification as an example.automl_config=videointelligence.StreamingAutomlClassificationConfig(model_name=model_path)video_config=videointelligence.StreamingVideoConfig(feature=videointelligence.StreamingFeature.STREAMING_AUTOML_CLASSIFICATION,automl_classification_config=automl_config,)# config_request should be the first in the stream of requests.config_request=videointelligence.StreamingAnnotateVideoRequest(video_config=video_config)# Set the chunk size to 5MB (recommended less than 10MB).chunk_size=5*1024*1024# Load file content.# Note: Input videos must have supported video codecs. See# https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs# for more details.stream=[]withio.open(path,"rb")asvideo_file:whileTrue:data=video_file.read(chunk_size)ifnotdata:breakstream.append(data)defstream_generator():yieldconfig_requestforchunkinstream:yieldvideointelligence.StreamingAnnotateVideoRequest(input_content=chunk)requests=stream_generator()# streaming_annotate_video returns a generator.# The default timeout is about 300 seconds.# To process longer videos it should be set to# larger than the length (in seconds) of the stream.responses=client.streaming_annotate_video(requests,timeout=600)forresponseinresponses:# Check for errors.ifresponse.error.message:print(response.error.message)breakforlabelinresponse.annotation_results.label_annotations:forframeinlabel.frames:print("At {:3d}s segment, {:5.1%}{}".format(frame.time_offset.seconds,frame.confidence,label.entity.entity_id,))