使用 create_training_pipeline 方法创建用于图片分类的训练流水线。
深入探索
如需查看包含此代码示例的详细文档,请参阅以下内容:
代码示例
Java
在尝试此示例之前,请按照《Vertex AI 快速入门:使用客户端库》中的 Java 设置说明执行操作。 如需了解详情,请参阅 Vertex AI Java API 参考文档。
如需向 Vertex AI 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import com.google.cloud.aiplatform.util.ValueConverter;
import com.google.cloud.aiplatform.v1.DeployedModelRef;
import com.google.cloud.aiplatform.v1.EnvVar;
import com.google.cloud.aiplatform.v1.FilterSplit;
import com.google.cloud.aiplatform.v1.FractionSplit;
import com.google.cloud.aiplatform.v1.InputDataConfig;
import com.google.cloud.aiplatform.v1.LocationName;
import com.google.cloud.aiplatform.v1.Model;
import com.google.cloud.aiplatform.v1.Model.ExportFormat;
import com.google.cloud.aiplatform.v1.ModelContainerSpec;
import com.google.cloud.aiplatform.v1.PipelineServiceClient;
import com.google.cloud.aiplatform.v1.PipelineServiceSettings;
import com.google.cloud.aiplatform.v1.Port;
import com.google.cloud.aiplatform.v1.PredefinedSplit;
import com.google.cloud.aiplatform.v1.PredictSchemata;
import com.google.cloud.aiplatform.v1.TimestampSplit;
import com.google.cloud.aiplatform.v1.TrainingPipeline;
import com.google.cloud.aiplatform.v1.schema.trainingjob.definition.AutoMlImageClassificationInputs;
import com.google.cloud.aiplatform.v1.schema.trainingjob.definition.AutoMlImageClassificationInputs.ModelType;
import com.google.rpc.Status;
import java.io.IOException;
public class CreateTrainingPipelineImageClassificationSample {
public static void main(String[] args) throws IOException {
// TODO(developer): Replace these variables before running the sample.
String trainingPipelineDisplayName = "YOUR_TRAINING_PIPELINE_DISPLAY_NAME";
String project = "YOUR_PROJECT_ID";
String datasetId = "YOUR_DATASET_ID";
String modelDisplayName = "YOUR_MODEL_DISPLAY_NAME";
createTrainingPipelineImageClassificationSample(
project, trainingPipelineDisplayName, datasetId, modelDisplayName);
}
static void createTrainingPipelineImageClassificationSample(
String project, String trainingPipelineDisplayName, String datasetId, String modelDisplayName)
throws IOException {
PipelineServiceSettings pipelineServiceSettings =
PipelineServiceSettings.newBuilder()
.setEndpoint("us-central1-aiplatform.googleapis.com:443")
.build();
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (PipelineServiceClient pipelineServiceClient =
PipelineServiceClient.create(pipelineServiceSettings)) {
String location = "us-central1";
String trainingTaskDefinition =
"gs://google-cloud-aiplatform/schema/trainingjob/definition/"
+ "automl_image_classification_1.0.0.yaml";
LocationName locationName = LocationName.of(project, location);
AutoMlImageClassificationInputs autoMlImageClassificationInputs =
AutoMlImageClassificationInputs.newBuilder()
.setModelType(ModelType.CLOUD)
.setMultiLabel(false)
.setBudgetMilliNodeHours(8000)
.setDisableEarlyStopping(false)
.build();
InputDataConfig trainingInputDataConfig =
InputDataConfig.newBuilder().setDatasetId(datasetId).build();
Model model = Model.newBuilder().setDisplayName(modelDisplayName).build();
TrainingPipeline trainingPipeline =
TrainingPipeline.newBuilder()
.setDisplayName(trainingPipelineDisplayName)
.setTrainingTaskDefinition(trainingTaskDefinition)
.setTrainingTaskInputs(ValueConverter.toValue(autoMlImageClassificationInputs))
.setInputDataConfig(trainingInputDataConfig)
.setModelToUpload(model)
.build();
TrainingPipeline trainingPipelineResponse =
pipelineServiceClient.createTrainingPipeline(locationName, trainingPipeline);
System.out.println("Create Training Pipeline Image Classification Response");
System.out.format("Name: %s\n", trainingPipelineResponse.getName());
System.out.format("Display Name: %s\n", trainingPipelineResponse.getDisplayName());
System.out.format(
"Training Task Definition %s\n", trainingPipelineResponse.getTrainingTaskDefinition());
System.out.format(
"Training Task Inputs: %s\n", trainingPipelineResponse.getTrainingTaskInputs());
System.out.format(
"Training Task Metadata: %s\n", trainingPipelineResponse.getTrainingTaskMetadata());
System.out.format("State: %s\n", trainingPipelineResponse.getState());
System.out.format("Create Time: %s\n", trainingPipelineResponse.getCreateTime());
System.out.format("StartTime %s\n", trainingPipelineResponse.getStartTime());
System.out.format("End Time: %s\n", trainingPipelineResponse.getEndTime());
System.out.format("Update Time: %s\n", trainingPipelineResponse.getUpdateTime());
System.out.format("Labels: %s\n", trainingPipelineResponse.getLabelsMap());
InputDataConfig inputDataConfig = trainingPipelineResponse.getInputDataConfig();
System.out.println("Input Data Config");
System.out.format("Dataset Id: %s", inputDataConfig.getDatasetId());
System.out.format("Annotations Filter: %s\n", inputDataConfig.getAnnotationsFilter());
FractionSplit fractionSplit = inputDataConfig.getFractionSplit();
System.out.println("Fraction Split");
System.out.format("Training Fraction: %s\n", fractionSplit.getTrainingFraction());
System.out.format("Validation Fraction: %s\n", fractionSplit.getValidationFraction());
System.out.format("Test Fraction: %s\n", fractionSplit.getTestFraction());
FilterSplit filterSplit = inputDataConfig.getFilterSplit();
System.out.println("Filter Split");
System.out.format("Training Filter: %s\n", filterSplit.getTrainingFilter());
System.out.format("Validation Filter: %s\n", filterSplit.getValidationFilter());
System.out.format("Test Filter: %s\n", filterSplit.getTestFilter());
PredefinedSplit predefinedSplit = inputDataConfig.getPredefinedSplit();
System.out.println("Predefined Split");
System.out.format("Key: %s\n", predefinedSplit.getKey());
TimestampSplit timestampSplit = inputDataConfig.getTimestampSplit();
System.out.println("Timestamp Split");
System.out.format("Training Fraction: %s\n", timestampSplit.getTrainingFraction());
System.out.format("Validation Fraction: %s\n", timestampSplit.getValidationFraction());
System.out.format("Test Fraction: %s\n", timestampSplit.getTestFraction());
System.out.format("Key: %s\n", timestampSplit.getKey());
Model modelResponse = trainingPipelineResponse.getModelToUpload();
System.out.println("Model To Upload");
System.out.format("Name: %s\n", modelResponse.getName());
System.out.format("Display Name: %s\n", modelResponse.getDisplayName());
System.out.format("Description: %s\n", modelResponse.getDescription());
System.out.format("Metadata Schema Uri: %s\n", modelResponse.getMetadataSchemaUri());
System.out.format("Metadata: %s\n", modelResponse.getMetadata());
System.out.format("Training Pipeline: %s\n", modelResponse.getTrainingPipeline());
System.out.format("Artifact Uri: %s\n", modelResponse.getArtifactUri());
System.out.format(
"Supported Deployment Resources Types: %s\n",
modelResponse.getSupportedDeploymentResourcesTypesList());
System.out.format(
"Supported Input Storage Formats: %s\n",
modelResponse.getSupportedInputStorageFormatsList());
System.out.format(
"Supported Output Storage Formats: %s\n",
modelResponse.getSupportedOutputStorageFormatsList());
System.out.format("Create Time: %s\n", modelResponse.getCreateTime());
System.out.format("Update Time: %s\n", modelResponse.getUpdateTime());
System.out.format("Labels: %sn\n", modelResponse.getLabelsMap());
PredictSchemata predictSchemata = modelResponse.getPredictSchemata();
System.out.println("Predict Schemata");
System.out.format("Instance Schema Uri: %s\n", predictSchemata.getInstanceSchemaUri());
System.out.format("Parameters Schema Uri: %s\n", predictSchemata.getParametersSchemaUri());
System.out.format("Prediction Schema Uri: %s\n", predictSchemata.getPredictionSchemaUri());
for (ExportFormat exportFormat : modelResponse.getSupportedExportFormatsList()) {
System.out.println("Supported Export Format");
System.out.format("Id: %s\n", exportFormat.getId());
}
ModelContainerSpec modelContainerSpec = modelResponse.getContainerSpec();
System.out.println("Container Spec");
System.out.format("Image Uri: %s\n", modelContainerSpec.getImageUri());
System.out.format("Command: %s\n", modelContainerSpec.getCommandList());
System.out.format("Args: %s\n", modelContainerSpec.getArgsList());
System.out.format("Predict Route: %s\n", modelContainerSpec.getPredictRoute());
System.out.format("Health Route: %s\n", modelContainerSpec.getHealthRoute());
for (EnvVar envVar : modelContainerSpec.getEnvList()) {
System.out.println("Env");
System.out.format("Name: %s\n", envVar.getName());
System.out.format("Value: %s\n", envVar.getValue());
}
for (Port port : modelContainerSpec.getPortsList()) {
System.out.println("Port");
System.out.format("Container Port: %s\n", port.getContainerPort());
}
for (DeployedModelRef deployedModelRef : modelResponse.getDeployedModelsList()) {
System.out.println("Deployed Model");
System.out.format("Endpoint: %s\n", deployedModelRef.getEndpoint());
System.out.format("Deployed Model Id: %s\n", deployedModelRef.getDeployedModelId());
}
Status status = trainingPipelineResponse.getError();
System.out.println("Error");
System.out.format("Code: %s\n", status.getCode());
System.out.format("Message: %s\n", status.getMessage());
}
}
}
Node.js
在尝试此示例之前,请按照《Vertex AI 快速入门:使用客户端库》中的 Node.js 设置说明执行操作。 如需了解详情,请参阅 Vertex AI Node.js API 参考文档。
如需向 Vertex AI 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
/**
* TODO(developer): Uncomment these variables before running the sample.
* (Not necessary if passing values as arguments)
*/
/*
const datasetId = 'YOUR DATASET';
const modelDisplayName = 'NEW MODEL NAME;
const trainingPipelineDisplayName = 'NAME FOR TRAINING PIPELINE';
const project = 'YOUR PROJECT ID';
const location = 'us-central1';
*/
// Imports the Google Cloud Pipeline Service Client library
const aiplatform = require('@google-cloud/aiplatform');
const {definition} =
aiplatform.protos.google.cloud.aiplatform.v1.schema.trainingjob;
const ModelType = definition.AutoMlImageClassificationInputs.ModelType;
// Specifies the location of the api endpoint
const clientOptions = {
apiEndpoint: 'us-central1-aiplatform.googleapis.com',
};
// Instantiates a client
const {PipelineServiceClient} = aiplatform.v1;
const pipelineServiceClient = new PipelineServiceClient(clientOptions);
async function createTrainingPipelineImageClassification() {
// Configure the parent resource
const parent = `projects/${project}/locations/${location}`;
// Values should match the input expected by your model.
const trainingTaskInputsMessage =
new definition.AutoMlImageClassificationInputs({
multiLabel: true,
modelType: ModelType.CLOUD,
budgetMilliNodeHours: 8000,
disableEarlyStopping: false,
});
const trainingTaskInputs = trainingTaskInputsMessage.toValue();
const trainingTaskDefinition =
'gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_image_classification_1.0.0.yaml';
const modelToUpload = {displayName: modelDisplayName};
const inputDataConfig = {datasetId};
const trainingPipeline = {
displayName: trainingPipelineDisplayName,
trainingTaskDefinition,
trainingTaskInputs,
inputDataConfig,
modelToUpload,
};
const request = {parent, trainingPipeline};
// Create training pipeline request
const [response] =
await pipelineServiceClient.createTrainingPipeline(request);
console.log('Create training pipeline image classification response');
console.log(`Name : ${response.name}`);
console.log('Raw response:');
console.log(JSON.stringify(response, null, 2));
}
createTrainingPipelineImageClassification();
Python
在尝试此示例之前,请按照《Vertex AI 快速入门:使用客户端库》中的 Python 设置说明执行操作。 如需了解详情,请参阅 Vertex AI Python API 参考文档。
如需向 Vertex AI 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
from google.cloud import aiplatform
from google.cloud.aiplatform.gapic.schema import trainingjob
def create_training_pipeline_image_classification_sample(
project: str,
display_name: str,
dataset_id: str,
model_display_name: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
training_task_inputs = trainingjob.definition.AutoMlImageClassificationInputs(
multi_label=True,
model_type="CLOUD",
budget_milli_node_hours=8000,
disable_early_stopping=False,
).to_value()
training_pipeline = {
"display_name": display_name,
"training_task_definition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_image_classification_1.0.0.yaml",
"training_task_inputs": training_task_inputs,
"input_data_config": {"dataset_id": dataset_id},
"model_to_upload": {"display_name": model_display_name},
}
parent = f"projects/{project}/locations/{location}"
response = client.create_training_pipeline(
parent=parent, training_pipeline=training_pipeline
)
print("response:", response)
后续步骤
如需搜索和过滤其他 Google Cloud 产品的代码示例,请参阅 Google Cloud 示例浏览器。