일괄 예측 작업 가져오기

get_batch_prediction_job 메서드를 사용하여 일괄 예측 작업을 가져옵니다.

코드 샘플

Java

이 샘플을 사용해 보기 전에 Vertex AI 빠른 시작: 클라이언트 라이브러리 사용Java 설정 안내를 따르세요. 자세한 내용은 Vertex AI Java API 참고 문서를 참조하세요.

Vertex AI에 인증하려면 애플리케이션 기본 사용자 인증 정보를 설정합니다. 자세한 내용은 로컬 개발 환경의 인증 설정을 참조하세요.


import com.google.cloud.aiplatform.v1.BatchPredictionJob;
import com.google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig;
import com.google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig;
import com.google.cloud.aiplatform.v1.BatchPredictionJob.OutputInfo;
import com.google.cloud.aiplatform.v1.BatchPredictionJobName;
import com.google.cloud.aiplatform.v1.BigQueryDestination;
import com.google.cloud.aiplatform.v1.BigQuerySource;
import com.google.cloud.aiplatform.v1.CompletionStats;
import com.google.cloud.aiplatform.v1.GcsDestination;
import com.google.cloud.aiplatform.v1.GcsSource;
import com.google.cloud.aiplatform.v1.JobServiceClient;
import com.google.cloud.aiplatform.v1.JobServiceSettings;
import com.google.cloud.aiplatform.v1.ResourcesConsumed;
import com.google.protobuf.Any;
import com.google.rpc.Status;
import java.io.IOException;
import java.util.List;

public class GetBatchPredictionJobSample {

  public static void main(String[] args) throws IOException {
    // TODO(developer): Replace these variables before running the sample.
    String project = "YOUR_PROJECT_ID";
    String batchPredictionJobId = "YOUR_BATCH_PREDICTION_JOB_ID";
    getBatchPredictionJobSample(project, batchPredictionJobId);
  }

  static void getBatchPredictionJobSample(String project, String batchPredictionJobId)
      throws IOException {
    JobServiceSettings jobServiceSettings =
        JobServiceSettings.newBuilder()
            .setEndpoint("us-central1-aiplatform.googleapis.com:443")
            .build();

    // Initialize client that will be used to send requests. This client only needs to be created
    // once, and can be reused for multiple requests. After completing all of your requests, call
    // the "close" method on the client to safely clean up any remaining background resources.
    try (JobServiceClient jobServiceClient = JobServiceClient.create(jobServiceSettings)) {
      String location = "us-central1";
      BatchPredictionJobName batchPredictionJobName =
          BatchPredictionJobName.of(project, location, batchPredictionJobId);

      BatchPredictionJob batchPredictionJob =
          jobServiceClient.getBatchPredictionJob(batchPredictionJobName);

      System.out.println("Get Batch Prediction Job Response");
      System.out.format("\tName: %s\n", batchPredictionJob.getName());
      System.out.format("\tDisplay Name: %s\n", batchPredictionJob.getDisplayName());
      System.out.format("\tModel: %s\n", batchPredictionJob.getModel());

      System.out.format("\tModel Parameters: %s\n", batchPredictionJob.getModelParameters());
      System.out.format("\tState: %s\n", batchPredictionJob.getState());

      System.out.format("\tCreate Time: %s\n", batchPredictionJob.getCreateTime());
      System.out.format("\tStart Time: %s\n", batchPredictionJob.getStartTime());
      System.out.format("\tEnd Time: %s\n", batchPredictionJob.getEndTime());
      System.out.format("\tUpdate Time: %s\n", batchPredictionJob.getUpdateTime());
      System.out.format("\tLabels: %s\n", batchPredictionJob.getLabelsMap());

      InputConfig inputConfig = batchPredictionJob.getInputConfig();
      System.out.println("\tInput Config");
      System.out.format("\t\tInstances Format: %s\n", inputConfig.getInstancesFormat());

      GcsSource gcsSource = inputConfig.getGcsSource();
      System.out.println("\t\tGcs Source");
      System.out.format("\t\t\tUris: %s\n", gcsSource.getUrisList());

      BigQuerySource bigquerySource = inputConfig.getBigquerySource();
      System.out.println("\t\tBigquery Source");
      System.out.format("\t\t\tInput Uri: %s\n", bigquerySource.getInputUri());

      OutputConfig outputConfig = batchPredictionJob.getOutputConfig();
      System.out.println("\tOutput Config");
      System.out.format("\t\tPredictions Format: %s\n", outputConfig.getPredictionsFormat());

      GcsDestination gcsDestination = outputConfig.getGcsDestination();
      System.out.println("\t\tGcs Destination");
      System.out.format("\t\t\tOutput Uri Prefix: %s\n", gcsDestination.getOutputUriPrefix());

      BigQueryDestination bigqueryDestination = outputConfig.getBigqueryDestination();
      System.out.println("\t\tBigquery Destination");
      System.out.format("\t\t\tOutput Uri: %s\n", bigqueryDestination.getOutputUri());

      OutputInfo outputInfo = batchPredictionJob.getOutputInfo();
      System.out.println("\tOutput Info");
      System.out.format("\t\tGcs Output Directory: %s\n", outputInfo.getGcsOutputDirectory());
      System.out.format("\t\tBigquery Output Dataset: %s\n", outputInfo.getBigqueryOutputDataset());

      Status status = batchPredictionJob.getError();
      System.out.println("\tError");
      System.out.format("\t\tCode: %s\n", status.getCode());
      System.out.format("\t\tMessage: %s\n", status.getMessage());

      List<Any> detailsList = status.getDetailsList();

      for (Status partialFailure : batchPredictionJob.getPartialFailuresList()) {
        System.out.println("\tPartial Failure");
        System.out.format("\t\tCode: %s\n", partialFailure.getCode());
        System.out.format("\t\tMessage: %s\n", partialFailure.getMessage());
        List<Any> details = partialFailure.getDetailsList();
      }

      ResourcesConsumed resourcesConsumed = batchPredictionJob.getResourcesConsumed();
      System.out.println("\tResources Consumed");
      System.out.format("\t\tReplica Hours: %s\n", resourcesConsumed.getReplicaHours());

      CompletionStats completionStats = batchPredictionJob.getCompletionStats();
      System.out.println("\tCompletion Stats");
      System.out.format("\t\tSuccessful Count: %s\n", completionStats.getSuccessfulCount());
      System.out.format("\t\tFailed Count: %s\n", completionStats.getFailedCount());
      System.out.format("\t\tIncomplete Count: %s\n", completionStats.getIncompleteCount());
    }
  }
}

Node.js

이 샘플을 사용해 보기 전에 Vertex AI 빠른 시작: 클라이언트 라이브러리 사용Node.js 설정 안내를 따르세요. 자세한 내용은 Vertex AI Node.js API 참고 문서를 참조하세요.

Vertex AI에 인증하려면 애플리케이션 기본 사용자 인증 정보를 설정합니다. 자세한 내용은 로컬 개발 환경의 인증 설정을 참조하세요.

/**
 * TODO(developer): Uncomment these variables before running the sample.\
 * (Not necessary if passing values as arguments)
 */

// const batchPredictionJobId = 'YOUR_BATCH_PREDICTION_JOB_ID';
// const project = 'YOUR_PROJECT_ID';
// const location = 'YOUR_PROJECT_LOCATION';

// Imports the Google Cloud Job Service Client library
const {JobServiceClient} = require('@google-cloud/aiplatform');

// Specifies the location of the api endpoint
const clientOptions = {
  apiEndpoint: 'us-central1-aiplatform.googleapis.com',
};

// Instantiates a client
const jobServiceClient = new JobServiceClient(clientOptions);

async function getBatchPredictionJob() {
  // Configure the parent resource
  const name = `projects/${project}/locations/${location}/batchPredictionJobs/${batchPredictionJobId}`;
  const request = {
    name,
  };

  // Get batch prediction request
  const [response] = await jobServiceClient.getBatchPredictionJob(request);

  console.log('Get batch prediction job response');
  console.log(`\tName : ${response.name}`);
  console.log(`\tDisplayName : ${response.displayName}`);
  console.log(`\tModel : ${response.model}`);
  console.log(`\tModel parameters : ${response.modelParameters}`);
  console.log(`\tGenerate explanation : ${response.generateExplanation}`);
  console.log(`\tState : ${response.state}`);
  console.log(`\tCreate Time : ${JSON.stringify(response.createTime)}`);
  console.log(`\tStart Time : ${JSON.stringify(response.startTime)}`);
  console.log(`\tEnd Time : ${JSON.stringify(response.endTime)}`);
  console.log(`\tUpdate Time : ${JSON.stringify(response.updateTime)}`);
  console.log(`\tLabels : ${JSON.stringify(response.labels)}`);

  const inputConfig = response.inputConfig;
  console.log('\tInput config');
  console.log(`\t\tInstances format : ${inputConfig.instancesFormat}`);

  const gcsSource = inputConfig.gcsSource;
  console.log('\t\tGcs source');
  console.log(`\t\t\tUris : ${gcsSource.uris}`);

  const bigquerySource = inputConfig.bigquerySource;
  console.log('\t\tBigQuery Source');
  if (!bigquerySource) {
    console.log('\t\t\tInput Uri : {}');
  } else {
    console.log(`\t\t\tInput Uri : ${bigquerySource.inputUri}`);
  }

  const outputConfig = response.outputConfig;
  console.log('\t\tOutput config');
  console.log(`\t\tPredictions format : ${outputConfig.predictionsFormat}`);

  const gcsDestination = outputConfig.gcsDestination;
  console.log('\t\tGcs Destination');
  console.log(`\t\t\tOutput uri prefix : ${gcsDestination.outputUriPrefix}`);

  const bigqueryDestination = outputConfig.bigqueryDestination;
  if (!bigqueryDestination) {
    console.log('\t\tBigquery Destination');
    console.log('\t\t\tOutput uri : {}');
  } else {
    console.log('\t\tBigquery Destination');
    console.log(`\t\t\tOutput uri : ${bigqueryDestination.outputUri}`);
  }

  const outputInfo = response.outputInfo;
  if (!outputInfo) {
    console.log('\tOutput info');
    console.log('\t\tGcs output directory : {}');
    console.log('\t\tBigquery_output_dataset : {}');
  } else {
    console.log('\tOutput info');
    console.log(
      `\t\tGcs output directory : ${outputInfo.gcsOutputDirectory}`
    );
    console.log(`\t\tBigquery_output_dataset : 
          ${outputInfo.bigqueryOutputDataset}`);
  }

  const error = response.error;
  console.log('\tError');
  console.log(`\t\tCode : ${error.code}`);
  console.log(`\t\tMessage : ${error.message}`);

  const details = error.details;
  console.log(`\t\tDetails : ${details}`);

  const partialFailures = response.partialFailures;
  console.log('\tPartial failure');
  console.log(partialFailures);

  const resourcesConsumed = response.resourcesConsumed;
  console.log('\tResource consumed');
  if (!resourcesConsumed) {
    console.log('\t\tReplica Hours: {}');
  } else {
    console.log(`\t\tReplica Hours: ${resourcesConsumed.replicaHours}`);
  }

  const completionStats = response.completionStats;
  console.log('\tCompletion status');
  if (!completionStats) {
    console.log('\t\tSuccessful count: {}');
    console.log('\t\tFailed count: {}');
    console.log('\t\tIncomplete count: {}');
  } else {
    console.log(`\t\tSuccessful count: ${completionStats.successfulCount}`);
    console.log(`\t\tFailed count: ${completionStats.failedCount}`);
    console.log(`\t\tIncomplete count: ${completionStats.incompleteCount}`);
  }
}
getBatchPredictionJob();

Python

이 샘플을 사용해 보기 전에 Vertex AI 빠른 시작: 클라이언트 라이브러리 사용Python 설정 안내를 따르세요. 자세한 내용은 Vertex AI Python API 참고 문서를 참조하세요.

Vertex AI에 인증하려면 애플리케이션 기본 사용자 인증 정보를 설정합니다. 자세한 내용은 로컬 개발 환경의 인증 설정을 참조하세요.

from google.cloud import aiplatform


def get_batch_prediction_job_sample(
    project: str,
    batch_prediction_job_id: str,
    location: str = "us-central1",
    api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
    # The AI Platform services require regional API endpoints.
    client_options = {"api_endpoint": api_endpoint}
    # Initialize client that will be used to create and send requests.
    # This client only needs to be created once, and can be reused for multiple requests.
    client = aiplatform.gapic.JobServiceClient(client_options=client_options)
    name = client.batch_prediction_job_path(
        project=project, location=location, batch_prediction_job=batch_prediction_job_id
    )
    response = client.get_batch_prediction_job(name=name)
    print("response:", response)

다음 단계

다른 Google Cloud 제품의 코드 샘플을 검색하고 필터링하려면 Google Cloud 샘플 브라우저를 참조하세요.