- 3.52.0 (latest)
- 3.50.0
- 3.49.0
- 3.48.0
- 3.47.0
- 3.46.0
- 3.45.0
- 3.44.0
- 3.43.0
- 3.42.0
- 3.41.0
- 3.40.0
- 3.38.0
- 3.37.0
- 3.36.0
- 3.35.0
- 3.34.0
- 3.33.0
- 3.32.0
- 3.31.0
- 3.30.0
- 3.29.0
- 3.28.0
- 3.25.0
- 3.24.0
- 3.23.0
- 3.22.0
- 3.21.0
- 3.20.0
- 3.19.0
- 3.18.0
- 3.17.0
- 3.16.0
- 3.15.0
- 3.14.0
- 3.13.0
- 3.12.0
- 3.11.0
- 3.10.0
- 3.9.0
- 3.8.0
- 3.7.0
- 3.6.0
- 3.5.0
- 3.4.2
- 3.3.0
- 3.2.0
- 3.0.0
- 2.9.8
- 2.8.9
- 2.7.4
- 2.5.3
- 2.4.0
public final class Model extends GeneratedMessageV3 implements ModelOrBuilder
A trained machine learning Model.
Protobuf type google.cloud.aiplatform.v1beta1.Model
Inheritance
Object > AbstractMessageLite<MessageType,BuilderType> > AbstractMessage > GeneratedMessageV3 > ModelImplements
ModelOrBuilderStatic Fields
ARTIFACT_URI_FIELD_NUMBER
public static final int ARTIFACT_URI_FIELD_NUMBER
Type | Description |
int |
CONTAINER_SPEC_FIELD_NUMBER
public static final int CONTAINER_SPEC_FIELD_NUMBER
Type | Description |
int |
CREATE_TIME_FIELD_NUMBER
public static final int CREATE_TIME_FIELD_NUMBER
Type | Description |
int |
DEPLOYED_MODELS_FIELD_NUMBER
public static final int DEPLOYED_MODELS_FIELD_NUMBER
Type | Description |
int |
DESCRIPTION_FIELD_NUMBER
public static final int DESCRIPTION_FIELD_NUMBER
Type | Description |
int |
DISPLAY_NAME_FIELD_NUMBER
public static final int DISPLAY_NAME_FIELD_NUMBER
Type | Description |
int |
ENCRYPTION_SPEC_FIELD_NUMBER
public static final int ENCRYPTION_SPEC_FIELD_NUMBER
Type | Description |
int |
ETAG_FIELD_NUMBER
public static final int ETAG_FIELD_NUMBER
Type | Description |
int |
EXPLANATION_SPEC_FIELD_NUMBER
public static final int EXPLANATION_SPEC_FIELD_NUMBER
Type | Description |
int |
LABELS_FIELD_NUMBER
public static final int LABELS_FIELD_NUMBER
Type | Description |
int |
METADATA_FIELD_NUMBER
public static final int METADATA_FIELD_NUMBER
Type | Description |
int |
METADATA_SCHEMA_URI_FIELD_NUMBER
public static final int METADATA_SCHEMA_URI_FIELD_NUMBER
Type | Description |
int |
NAME_FIELD_NUMBER
public static final int NAME_FIELD_NUMBER
Type | Description |
int |
PREDICT_SCHEMATA_FIELD_NUMBER
public static final int PREDICT_SCHEMATA_FIELD_NUMBER
Type | Description |
int |
SUPPORTED_DEPLOYMENT_RESOURCES_TYPES_FIELD_NUMBER
public static final int SUPPORTED_DEPLOYMENT_RESOURCES_TYPES_FIELD_NUMBER
Type | Description |
int |
SUPPORTED_EXPORT_FORMATS_FIELD_NUMBER
public static final int SUPPORTED_EXPORT_FORMATS_FIELD_NUMBER
Type | Description |
int |
SUPPORTED_INPUT_STORAGE_FORMATS_FIELD_NUMBER
public static final int SUPPORTED_INPUT_STORAGE_FORMATS_FIELD_NUMBER
Type | Description |
int |
SUPPORTED_OUTPUT_STORAGE_FORMATS_FIELD_NUMBER
public static final int SUPPORTED_OUTPUT_STORAGE_FORMATS_FIELD_NUMBER
Type | Description |
int |
TRAINING_PIPELINE_FIELD_NUMBER
public static final int TRAINING_PIPELINE_FIELD_NUMBER
Type | Description |
int |
UPDATE_TIME_FIELD_NUMBER
public static final int UPDATE_TIME_FIELD_NUMBER
Type | Description |
int |
Static Methods
getDefaultInstance()
public static Model getDefaultInstance()
Type | Description |
Model |
getDescriptor()
public static final Descriptors.Descriptor getDescriptor()
Type | Description |
Descriptor |
newBuilder()
public static Model.Builder newBuilder()
Type | Description |
Model.Builder |
newBuilder(Model prototype)
public static Model.Builder newBuilder(Model prototype)
Name | Description |
prototype | Model |
Type | Description |
Model.Builder |
parseDelimitedFrom(InputStream input)
public static Model parseDelimitedFrom(InputStream input)
Name | Description |
input | InputStream |
Type | Description |
Model |
Type | Description |
IOException |
parseDelimitedFrom(InputStream input, ExtensionRegistryLite extensionRegistry)
public static Model parseDelimitedFrom(InputStream input, ExtensionRegistryLite extensionRegistry)
Name | Description |
input | InputStream |
extensionRegistry | ExtensionRegistryLite |
Type | Description |
Model |
Type | Description |
IOException |
parseFrom(byte[] data)
public static Model parseFrom(byte[] data)
Name | Description |
data | byte[] |
Type | Description |
Model |
Type | Description |
InvalidProtocolBufferException |
parseFrom(byte[] data, ExtensionRegistryLite extensionRegistry)
public static Model parseFrom(byte[] data, ExtensionRegistryLite extensionRegistry)
Name | Description |
data | byte[] |
extensionRegistry | ExtensionRegistryLite |
Type | Description |
Model |
Type | Description |
InvalidProtocolBufferException |
parseFrom(ByteString data)
public static Model parseFrom(ByteString data)
Name | Description |
data | ByteString |
Type | Description |
Model |
Type | Description |
InvalidProtocolBufferException |
parseFrom(ByteString data, ExtensionRegistryLite extensionRegistry)
public static Model parseFrom(ByteString data, ExtensionRegistryLite extensionRegistry)
Name | Description |
data | ByteString |
extensionRegistry | ExtensionRegistryLite |
Type | Description |
Model |
Type | Description |
InvalidProtocolBufferException |
parseFrom(CodedInputStream input)
public static Model parseFrom(CodedInputStream input)
Name | Description |
input | CodedInputStream |
Type | Description |
Model |
Type | Description |
IOException |
parseFrom(CodedInputStream input, ExtensionRegistryLite extensionRegistry)
public static Model parseFrom(CodedInputStream input, ExtensionRegistryLite extensionRegistry)
Name | Description |
input | CodedInputStream |
extensionRegistry | ExtensionRegistryLite |
Type | Description |
Model |
Type | Description |
IOException |
parseFrom(InputStream input)
public static Model parseFrom(InputStream input)
Name | Description |
input | InputStream |
Type | Description |
Model |
Type | Description |
IOException |
parseFrom(InputStream input, ExtensionRegistryLite extensionRegistry)
public static Model parseFrom(InputStream input, ExtensionRegistryLite extensionRegistry)
Name | Description |
input | InputStream |
extensionRegistry | ExtensionRegistryLite |
Type | Description |
Model |
Type | Description |
IOException |
parseFrom(ByteBuffer data)
public static Model parseFrom(ByteBuffer data)
Name | Description |
data | ByteBuffer |
Type | Description |
Model |
Type | Description |
InvalidProtocolBufferException |
parseFrom(ByteBuffer data, ExtensionRegistryLite extensionRegistry)
public static Model parseFrom(ByteBuffer data, ExtensionRegistryLite extensionRegistry)
Name | Description |
data | ByteBuffer |
extensionRegistry | ExtensionRegistryLite |
Type | Description |
Model |
Type | Description |
InvalidProtocolBufferException |
parser()
public static Parser<Model> parser()
Type | Description |
Parser<Model> |
Methods
containsLabels(String key)
public boolean containsLabels(String key)
The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
map<string, string> labels = 17;
Name | Description |
key | String |
Type | Description |
boolean |
equals(Object obj)
public boolean equals(Object obj)
Name | Description |
obj | Object |
Type | Description |
boolean |
getArtifactUri()
public String getArtifactUri()
Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models.
string artifact_uri = 26 [(.google.api.field_behavior) = IMMUTABLE];
Type | Description |
String | The artifactUri. |
getArtifactUriBytes()
public ByteString getArtifactUriBytes()
Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models.
string artifact_uri = 26 [(.google.api.field_behavior) = IMMUTABLE];
Type | Description |
ByteString | The bytes for artifactUri. |
getContainerSpec()
public ModelContainerSpec getContainerSpec()
Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models.
.google.cloud.aiplatform.v1beta1.ModelContainerSpec container_spec = 9 [(.google.api.field_behavior) = INPUT_ONLY];
Type | Description |
ModelContainerSpec | The containerSpec. |
getContainerSpecOrBuilder()
public ModelContainerSpecOrBuilder getContainerSpecOrBuilder()
Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models.
.google.cloud.aiplatform.v1beta1.ModelContainerSpec container_spec = 9 [(.google.api.field_behavior) = INPUT_ONLY];
Type | Description |
ModelContainerSpecOrBuilder |
getCreateTime()
public Timestamp getCreateTime()
Output only. Timestamp when this Model was uploaded into Vertex AI.
.google.protobuf.Timestamp create_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
Timestamp | The createTime. |
getCreateTimeOrBuilder()
public TimestampOrBuilder getCreateTimeOrBuilder()
Output only. Timestamp when this Model was uploaded into Vertex AI.
.google.protobuf.Timestamp create_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
TimestampOrBuilder |
getDefaultInstanceForType()
public Model getDefaultInstanceForType()
Type | Description |
Model |
getDeployedModels(int index)
public DeployedModelRef getDeployedModels(int index)
Output only. The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations.
repeated .google.cloud.aiplatform.v1beta1.DeployedModelRef deployed_models = 15 [(.google.api.field_behavior) = OUTPUT_ONLY];
Name | Description |
index | int |
Type | Description |
DeployedModelRef |
getDeployedModelsCount()
public int getDeployedModelsCount()
Output only. The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations.
repeated .google.cloud.aiplatform.v1beta1.DeployedModelRef deployed_models = 15 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
int |
getDeployedModelsList()
public List<DeployedModelRef> getDeployedModelsList()
Output only. The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations.
repeated .google.cloud.aiplatform.v1beta1.DeployedModelRef deployed_models = 15 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
List<DeployedModelRef> |
getDeployedModelsOrBuilder(int index)
public DeployedModelRefOrBuilder getDeployedModelsOrBuilder(int index)
Output only. The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations.
repeated .google.cloud.aiplatform.v1beta1.DeployedModelRef deployed_models = 15 [(.google.api.field_behavior) = OUTPUT_ONLY];
Name | Description |
index | int |
Type | Description |
DeployedModelRefOrBuilder |
getDeployedModelsOrBuilderList()
public List<? extends DeployedModelRefOrBuilder> getDeployedModelsOrBuilderList()
Output only. The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations.
repeated .google.cloud.aiplatform.v1beta1.DeployedModelRef deployed_models = 15 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
List<? extends com.google.cloud.aiplatform.v1beta1.DeployedModelRefOrBuilder> |
getDescription()
public String getDescription()
The description of the Model.
string description = 3;
Type | Description |
String | The description. |
getDescriptionBytes()
public ByteString getDescriptionBytes()
The description of the Model.
string description = 3;
Type | Description |
ByteString | The bytes for description. |
getDisplayName()
public String getDisplayName()
Required. The display name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters.
string display_name = 2 [(.google.api.field_behavior) = REQUIRED];
Type | Description |
String | The displayName. |
getDisplayNameBytes()
public ByteString getDisplayNameBytes()
Required. The display name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters.
string display_name = 2 [(.google.api.field_behavior) = REQUIRED];
Type | Description |
ByteString | The bytes for displayName. |
getEncryptionSpec()
public EncryptionSpec getEncryptionSpec()
Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key.
.google.cloud.aiplatform.v1beta1.EncryptionSpec encryption_spec = 24;
Type | Description |
EncryptionSpec | The encryptionSpec. |
getEncryptionSpecOrBuilder()
public EncryptionSpecOrBuilder getEncryptionSpecOrBuilder()
Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key.
.google.cloud.aiplatform.v1beta1.EncryptionSpec encryption_spec = 24;
Type | Description |
EncryptionSpecOrBuilder |
getEtag()
public String getEtag()
Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.
string etag = 16;
Type | Description |
String | The etag. |
getEtagBytes()
public ByteString getEtagBytes()
Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.
string etag = 16;
Type | Description |
ByteString | The bytes for etag. |
getExplanationSpec()
public ExplanationSpec getExplanationSpec()
The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob.
.google.cloud.aiplatform.v1beta1.ExplanationSpec explanation_spec = 23;
Type | Description |
ExplanationSpec | The explanationSpec. |
getExplanationSpecOrBuilder()
public ExplanationSpecOrBuilder getExplanationSpecOrBuilder()
The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob.
.google.cloud.aiplatform.v1beta1.ExplanationSpec explanation_spec = 23;
Type | Description |
ExplanationSpecOrBuilder |
getLabels()
public Map<String,String> getLabels()
Use #getLabelsMap() instead.
Type | Description |
Map<String,String> |
getLabelsCount()
public int getLabelsCount()
The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
map<string, string> labels = 17;
Type | Description |
int |
getLabelsMap()
public Map<String,String> getLabelsMap()
The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
map<string, string> labels = 17;
Type | Description |
Map<String,String> |
getLabelsOrDefault(String key, String defaultValue)
public String getLabelsOrDefault(String key, String defaultValue)
The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
map<string, string> labels = 17;
Name | Description |
key | String |
defaultValue | String |
Type | Description |
String |
getLabelsOrThrow(String key)
public String getLabelsOrThrow(String key)
The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
map<string, string> labels = 17;
Name | Description |
key | String |
Type | Description |
String |
getMetadata()
public Value getMetadata()
Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information.
.google.protobuf.Value metadata = 6 [(.google.api.field_behavior) = IMMUTABLE];
Type | Description |
Value | The metadata. |
getMetadataOrBuilder()
public ValueOrBuilder getMetadataOrBuilder()
Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information.
.google.protobuf.Value metadata = 6 [(.google.api.field_behavior) = IMMUTABLE];
Type | Description |
ValueOrBuilder |
getMetadataSchemaUri()
public String getMetadataSchemaUri()
Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 Schema Object. AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access.
string metadata_schema_uri = 5 [(.google.api.field_behavior) = IMMUTABLE];
Type | Description |
String | The metadataSchemaUri. |
getMetadataSchemaUriBytes()
public ByteString getMetadataSchemaUriBytes()
Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 Schema Object. AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access.
string metadata_schema_uri = 5 [(.google.api.field_behavior) = IMMUTABLE];
Type | Description |
ByteString | The bytes for metadataSchemaUri. |
getName()
public String getName()
The resource name of the Model.
string name = 1;
Type | Description |
String | The name. |
getNameBytes()
public ByteString getNameBytes()
The resource name of the Model.
string name = 1;
Type | Description |
ByteString | The bytes for name. |
getParserForType()
public Parser<Model> getParserForType()
Type | Description |
Parser<Model> |
getPredictSchemata()
public PredictSchemata getPredictSchemata()
The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain.
.google.cloud.aiplatform.v1beta1.PredictSchemata predict_schemata = 4;
Type | Description |
PredictSchemata | The predictSchemata. |
getPredictSchemataOrBuilder()
public PredictSchemataOrBuilder getPredictSchemataOrBuilder()
The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain.
.google.cloud.aiplatform.v1beta1.PredictSchemata predict_schemata = 4;
Type | Description |
PredictSchemataOrBuilder |
getSerializedSize()
public int getSerializedSize()
Type | Description |
int |
getSupportedDeploymentResourcesTypes(int index)
public Model.DeploymentResourcesType getSupportedDeploymentResourcesTypes(int index)
Output only. When this Model is deployed, its prediction resources are described by the
prediction_resources
field of the Endpoint.deployed_models object.
Because not all Models support all resource configuration types, the
configuration types this Model supports are listed here. If no
configuration types are listed, the Model cannot be deployed to an
Endpoint and does not support
online predictions (PredictionService.Predict or
PredictionService.Explain). Such a Model can serve predictions by
using a BatchPredictionJob, if it has at least one entry each in
supported_input_storage_formats and
supported_output_storage_formats.
repeated .google.cloud.aiplatform.v1beta1.Model.DeploymentResourcesType supported_deployment_resources_types = 10 [(.google.api.field_behavior) = OUTPUT_ONLY];
Name | Description |
index | int The index of the element to return. |
Type | Description |
Model.DeploymentResourcesType | The supportedDeploymentResourcesTypes at the given index. |
getSupportedDeploymentResourcesTypesCount()
public int getSupportedDeploymentResourcesTypesCount()
Output only. When this Model is deployed, its prediction resources are described by the
prediction_resources
field of the Endpoint.deployed_models object.
Because not all Models support all resource configuration types, the
configuration types this Model supports are listed here. If no
configuration types are listed, the Model cannot be deployed to an
Endpoint and does not support
online predictions (PredictionService.Predict or
PredictionService.Explain). Such a Model can serve predictions by
using a BatchPredictionJob, if it has at least one entry each in
supported_input_storage_formats and
supported_output_storage_formats.
repeated .google.cloud.aiplatform.v1beta1.Model.DeploymentResourcesType supported_deployment_resources_types = 10 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
int | The count of supportedDeploymentResourcesTypes. |
getSupportedDeploymentResourcesTypesList()
public List<Model.DeploymentResourcesType> getSupportedDeploymentResourcesTypesList()
Output only. When this Model is deployed, its prediction resources are described by the
prediction_resources
field of the Endpoint.deployed_models object.
Because not all Models support all resource configuration types, the
configuration types this Model supports are listed here. If no
configuration types are listed, the Model cannot be deployed to an
Endpoint and does not support
online predictions (PredictionService.Predict or
PredictionService.Explain). Such a Model can serve predictions by
using a BatchPredictionJob, if it has at least one entry each in
supported_input_storage_formats and
supported_output_storage_formats.
repeated .google.cloud.aiplatform.v1beta1.Model.DeploymentResourcesType supported_deployment_resources_types = 10 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
List<DeploymentResourcesType> | A list containing the supportedDeploymentResourcesTypes. |
getSupportedDeploymentResourcesTypesValue(int index)
public int getSupportedDeploymentResourcesTypesValue(int index)
Output only. When this Model is deployed, its prediction resources are described by the
prediction_resources
field of the Endpoint.deployed_models object.
Because not all Models support all resource configuration types, the
configuration types this Model supports are listed here. If no
configuration types are listed, the Model cannot be deployed to an
Endpoint and does not support
online predictions (PredictionService.Predict or
PredictionService.Explain). Such a Model can serve predictions by
using a BatchPredictionJob, if it has at least one entry each in
supported_input_storage_formats and
supported_output_storage_formats.
repeated .google.cloud.aiplatform.v1beta1.Model.DeploymentResourcesType supported_deployment_resources_types = 10 [(.google.api.field_behavior) = OUTPUT_ONLY];
Name | Description |
index | int The index of the value to return. |
Type | Description |
int | The enum numeric value on the wire of supportedDeploymentResourcesTypes at the given index. |
getSupportedDeploymentResourcesTypesValueList()
public List<Integer> getSupportedDeploymentResourcesTypesValueList()
Output only. When this Model is deployed, its prediction resources are described by the
prediction_resources
field of the Endpoint.deployed_models object.
Because not all Models support all resource configuration types, the
configuration types this Model supports are listed here. If no
configuration types are listed, the Model cannot be deployed to an
Endpoint and does not support
online predictions (PredictionService.Predict or
PredictionService.Explain). Such a Model can serve predictions by
using a BatchPredictionJob, if it has at least one entry each in
supported_input_storage_formats and
supported_output_storage_formats.
repeated .google.cloud.aiplatform.v1beta1.Model.DeploymentResourcesType supported_deployment_resources_types = 10 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
List<Integer> | A list containing the enum numeric values on the wire for supportedDeploymentResourcesTypes. |
getSupportedExportFormats(int index)
public Model.ExportFormat getSupportedExportFormats(int index)
Output only. The formats in which this Model may be exported. If empty, this Model is not available for export.
repeated .google.cloud.aiplatform.v1beta1.Model.ExportFormat supported_export_formats = 20 [(.google.api.field_behavior) = OUTPUT_ONLY];
Name | Description |
index | int |
Type | Description |
Model.ExportFormat |
getSupportedExportFormatsCount()
public int getSupportedExportFormatsCount()
Output only. The formats in which this Model may be exported. If empty, this Model is not available for export.
repeated .google.cloud.aiplatform.v1beta1.Model.ExportFormat supported_export_formats = 20 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
int |
getSupportedExportFormatsList()
public List<Model.ExportFormat> getSupportedExportFormatsList()
Output only. The formats in which this Model may be exported. If empty, this Model is not available for export.
repeated .google.cloud.aiplatform.v1beta1.Model.ExportFormat supported_export_formats = 20 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
List<ExportFormat> |
getSupportedExportFormatsOrBuilder(int index)
public Model.ExportFormatOrBuilder getSupportedExportFormatsOrBuilder(int index)
Output only. The formats in which this Model may be exported. If empty, this Model is not available for export.
repeated .google.cloud.aiplatform.v1beta1.Model.ExportFormat supported_export_formats = 20 [(.google.api.field_behavior) = OUTPUT_ONLY];
Name | Description |
index | int |
Type | Description |
Model.ExportFormatOrBuilder |
getSupportedExportFormatsOrBuilderList()
public List<? extends Model.ExportFormatOrBuilder> getSupportedExportFormatsOrBuilderList()
Output only. The formats in which this Model may be exported. If empty, this Model is not available for export.
repeated .google.cloud.aiplatform.v1beta1.Model.ExportFormat supported_export_formats = 20 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
List<? extends com.google.cloud.aiplatform.v1beta1.Model.ExportFormatOrBuilder> |
getSupportedInputStorageFormats(int index)
public String getSupportedInputStorageFormats(int index)
Output only. The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are:
jsonl
The JSON Lines format, where each instance is a single line. Uses GcsSource.csv
The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource.tf-record
The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource.tf-record-gzip
Similar totf-record
, but the file is gzipped. Uses GcsSource.bigquery
Each instance is a single row in BigQuery. Uses BigQuerySource.file-list
Each line of the file is the location of an instance to process, usesgcs_source
field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain.
repeated string supported_input_storage_formats = 11 [(.google.api.field_behavior) = OUTPUT_ONLY];
Name | Description |
index | int The index of the element to return. |
Type | Description |
String | The supportedInputStorageFormats at the given index. |
getSupportedInputStorageFormatsBytes(int index)
public ByteString getSupportedInputStorageFormatsBytes(int index)
Output only. The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are:
jsonl
The JSON Lines format, where each instance is a single line. Uses GcsSource.csv
The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource.tf-record
The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource.tf-record-gzip
Similar totf-record
, but the file is gzipped. Uses GcsSource.bigquery
Each instance is a single row in BigQuery. Uses BigQuerySource.file-list
Each line of the file is the location of an instance to process, usesgcs_source
field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain.
repeated string supported_input_storage_formats = 11 [(.google.api.field_behavior) = OUTPUT_ONLY];
Name | Description |
index | int The index of the value to return. |
Type | Description |
ByteString | The bytes of the supportedInputStorageFormats at the given index. |
getSupportedInputStorageFormatsCount()
public int getSupportedInputStorageFormatsCount()
Output only. The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are:
jsonl
The JSON Lines format, where each instance is a single line. Uses GcsSource.csv
The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource.tf-record
The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource.tf-record-gzip
Similar totf-record
, but the file is gzipped. Uses GcsSource.bigquery
Each instance is a single row in BigQuery. Uses BigQuerySource.file-list
Each line of the file is the location of an instance to process, usesgcs_source
field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain.
repeated string supported_input_storage_formats = 11 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
int | The count of supportedInputStorageFormats. |
getSupportedInputStorageFormatsList()
public ProtocolStringList getSupportedInputStorageFormatsList()
Output only. The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are:
jsonl
The JSON Lines format, where each instance is a single line. Uses GcsSource.csv
The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource.tf-record
The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource.tf-record-gzip
Similar totf-record
, but the file is gzipped. Uses GcsSource.bigquery
Each instance is a single row in BigQuery. Uses BigQuerySource.file-list
Each line of the file is the location of an instance to process, usesgcs_source
field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain.
repeated string supported_input_storage_formats = 11 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
ProtocolStringList | A list containing the supportedInputStorageFormats. |
getSupportedOutputStorageFormats(int index)
public String getSupportedOutputStorageFormats(int index)
Output only. The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are:
jsonl
The JSON Lines format, where each prediction is a single line. Uses GcsDestination.csv
The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination.bigquery
Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain.
repeated string supported_output_storage_formats = 12 [(.google.api.field_behavior) = OUTPUT_ONLY];
Name | Description |
index | int The index of the element to return. |
Type | Description |
String | The supportedOutputStorageFormats at the given index. |
getSupportedOutputStorageFormatsBytes(int index)
public ByteString getSupportedOutputStorageFormatsBytes(int index)
Output only. The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are:
jsonl
The JSON Lines format, where each prediction is a single line. Uses GcsDestination.csv
The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination.bigquery
Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain.
repeated string supported_output_storage_formats = 12 [(.google.api.field_behavior) = OUTPUT_ONLY];
Name | Description |
index | int The index of the value to return. |
Type | Description |
ByteString | The bytes of the supportedOutputStorageFormats at the given index. |
getSupportedOutputStorageFormatsCount()
public int getSupportedOutputStorageFormatsCount()
Output only. The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are:
jsonl
The JSON Lines format, where each prediction is a single line. Uses GcsDestination.csv
The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination.bigquery
Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain.
repeated string supported_output_storage_formats = 12 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
int | The count of supportedOutputStorageFormats. |
getSupportedOutputStorageFormatsList()
public ProtocolStringList getSupportedOutputStorageFormatsList()
Output only. The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are:
jsonl
The JSON Lines format, where each prediction is a single line. Uses GcsDestination.csv
The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination.bigquery
Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain.
repeated string supported_output_storage_formats = 12 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
ProtocolStringList | A list containing the supportedOutputStorageFormats. |
getTrainingPipeline()
public String getTrainingPipeline()
Output only. The resource name of the TrainingPipeline that uploaded this Model, if any.
string training_pipeline = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... }
Type | Description |
String | The trainingPipeline. |
getTrainingPipelineBytes()
public ByteString getTrainingPipelineBytes()
Output only. The resource name of the TrainingPipeline that uploaded this Model, if any.
string training_pipeline = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... }
Type | Description |
ByteString | The bytes for trainingPipeline. |
getUnknownFields()
public final UnknownFieldSet getUnknownFields()
Type | Description |
UnknownFieldSet |
getUpdateTime()
public Timestamp getUpdateTime()
Output only. Timestamp when this Model was most recently updated.
.google.protobuf.Timestamp update_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
Timestamp | The updateTime. |
getUpdateTimeOrBuilder()
public TimestampOrBuilder getUpdateTimeOrBuilder()
Output only. Timestamp when this Model was most recently updated.
.google.protobuf.Timestamp update_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
TimestampOrBuilder |
hasContainerSpec()
public boolean hasContainerSpec()
Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models.
.google.cloud.aiplatform.v1beta1.ModelContainerSpec container_spec = 9 [(.google.api.field_behavior) = INPUT_ONLY];
Type | Description |
boolean | Whether the containerSpec field is set. |
hasCreateTime()
public boolean hasCreateTime()
Output only. Timestamp when this Model was uploaded into Vertex AI.
.google.protobuf.Timestamp create_time = 13 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
boolean | Whether the createTime field is set. |
hasEncryptionSpec()
public boolean hasEncryptionSpec()
Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key.
.google.cloud.aiplatform.v1beta1.EncryptionSpec encryption_spec = 24;
Type | Description |
boolean | Whether the encryptionSpec field is set. |
hasExplanationSpec()
public boolean hasExplanationSpec()
The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob.
.google.cloud.aiplatform.v1beta1.ExplanationSpec explanation_spec = 23;
Type | Description |
boolean | Whether the explanationSpec field is set. |
hasMetadata()
public boolean hasMetadata()
Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information.
.google.protobuf.Value metadata = 6 [(.google.api.field_behavior) = IMMUTABLE];
Type | Description |
boolean | Whether the metadata field is set. |
hasPredictSchemata()
public boolean hasPredictSchemata()
The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain.
.google.cloud.aiplatform.v1beta1.PredictSchemata predict_schemata = 4;
Type | Description |
boolean | Whether the predictSchemata field is set. |
hasUpdateTime()
public boolean hasUpdateTime()
Output only. Timestamp when this Model was most recently updated.
.google.protobuf.Timestamp update_time = 14 [(.google.api.field_behavior) = OUTPUT_ONLY];
Type | Description |
boolean | Whether the updateTime field is set. |
hashCode()
public int hashCode()
Type | Description |
int |
internalGetFieldAccessorTable()
protected GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()
Type | Description |
FieldAccessorTable |
internalGetMapField(int number)
protected MapField internalGetMapField(int number)
Name | Description |
number | int |
Type | Description |
MapField |
isInitialized()
public final boolean isInitialized()
Type | Description |
boolean |
newBuilderForType()
public Model.Builder newBuilderForType()
Type | Description |
Model.Builder |
newBuilderForType(GeneratedMessageV3.BuilderParent parent)
protected Model.Builder newBuilderForType(GeneratedMessageV3.BuilderParent parent)
Name | Description |
parent | BuilderParent |
Type | Description |
Model.Builder |
newInstance(GeneratedMessageV3.UnusedPrivateParameter unused)
protected Object newInstance(GeneratedMessageV3.UnusedPrivateParameter unused)
Name | Description |
unused | UnusedPrivateParameter |
Type | Description |
Object |
toBuilder()
public Model.Builder toBuilder()
Type | Description |
Model.Builder |
writeTo(CodedOutputStream output)
public void writeTo(CodedOutputStream output)
Name | Description |
output | CodedOutputStream |
Type | Description |
IOException |