- 0.51.0 (latest)
- 0.50.0
- 0.49.0
- 0.48.0
- 0.47.0
- 0.46.0
- 0.45.0
- 0.44.0
- 0.43.0
- 0.42.0
- 0.41.0
- 0.40.0
- 0.39.0
- 0.38.0
- 0.37.0
- 0.36.0
- 0.35.0
- 0.34.0
- 0.33.0
- 0.32.0
- 0.31.0
- 0.30.0
- 0.29.0
- 0.28.0
- 0.27.0
- 0.26.0
- 0.25.0
- 0.24.0
- 0.23.0
- 0.22.0
- 0.21.0
- 0.20.0
- 0.19.0
- 0.18.0
- 0.17.0
- 0.16.0
- 0.15.0
- 0.14.0
- 0.13.0
- 0.12.0
- 0.11.0
- 0.10.0
- 0.9.1
- 0.8.0
- 0.7.0
- 0.6.0
- 0.5.0
- 0.4.0
- 0.3.0
- 0.2.0
- 0.1.0
Reference documentation and code samples for the Vertex AI V1 API class Google::Cloud::AIPlatform::V1::Model.
A trained machine learning Model.
Inherits
- Object
Extended By
- Google::Protobuf::MessageExts::ClassMethods
Includes
- Google::Protobuf::MessageExts
Methods
#artifact_uri
def artifact_uri() -> ::String
- (::String) — Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models.
#artifact_uri=
def artifact_uri=(value) -> ::String
- value (::String) — Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models.
- (::String) — Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models.
#container_spec
def container_spec() -> ::Google::Cloud::AIPlatform::V1::ModelContainerSpec
- (::Google::Cloud::AIPlatform::V1::ModelContainerSpec) — Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models.
#container_spec=
def container_spec=(value) -> ::Google::Cloud::AIPlatform::V1::ModelContainerSpec
- value (::Google::Cloud::AIPlatform::V1::ModelContainerSpec) — Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models.
- (::Google::Cloud::AIPlatform::V1::ModelContainerSpec) — Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models.
#create_time
def create_time() -> ::Google::Protobuf::Timestamp
- (::Google::Protobuf::Timestamp) — Output only. Timestamp when this Model was uploaded into Vertex AI.
#deployed_models
def deployed_models() -> ::Array<::Google::Cloud::AIPlatform::V1::DeployedModelRef>
- (::Array<::Google::Cloud::AIPlatform::V1::DeployedModelRef>) — Output only. The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations.
#description
def description() -> ::String
- (::String) — The description of the Model.
#description=
def description=(value) -> ::String
- value (::String) — The description of the Model.
- (::String) — The description of the Model.
#display_name
def display_name() -> ::String
- (::String) — Required. The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters.
#display_name=
def display_name=(value) -> ::String
- value (::String) — Required. The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters.
- (::String) — Required. The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters.
#encryption_spec
def encryption_spec() -> ::Google::Cloud::AIPlatform::V1::EncryptionSpec
- (::Google::Cloud::AIPlatform::V1::EncryptionSpec) — Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key.
#encryption_spec=
def encryption_spec=(value) -> ::Google::Cloud::AIPlatform::V1::EncryptionSpec
- value (::Google::Cloud::AIPlatform::V1::EncryptionSpec) — Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key.
- (::Google::Cloud::AIPlatform::V1::EncryptionSpec) — Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key.
#etag
def etag() -> ::String
- (::String) — Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.
#etag=
def etag=(value) -> ::String
- value (::String) — Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.
- (::String) — Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.
#explanation_spec
def explanation_spec() -> ::Google::Cloud::AIPlatform::V1::ExplanationSpec
-
(::Google::Cloud::AIPlatform::V1::ExplanationSpec) — The default explanation specification for this Model.
The Model can be used for [requesting explanation][google.cloud.aiplatform.v1.PredictionService.Explain] after being deployed if it is populated. The Model can be used for [batch explanation][google.cloud.aiplatform.v1.BatchPredictionJob.generate_explanation] if it is populated.
All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob.
If the default explanation specification is not set for this Model, this Model can still be used for [requesting explanation][google.cloud.aiplatform.v1.PredictionService.Explain] by setting explanation_spec of DeployModelRequest.deployed_model and for [batch explanation][google.cloud.aiplatform.v1.BatchPredictionJob.generate_explanation] by setting explanation_spec of BatchPredictionJob.
#explanation_spec=
def explanation_spec=(value) -> ::Google::Cloud::AIPlatform::V1::ExplanationSpec
-
value (::Google::Cloud::AIPlatform::V1::ExplanationSpec) — The default explanation specification for this Model.
The Model can be used for [requesting explanation][google.cloud.aiplatform.v1.PredictionService.Explain] after being deployed if it is populated. The Model can be used for [batch explanation][google.cloud.aiplatform.v1.BatchPredictionJob.generate_explanation] if it is populated.
All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob.
If the default explanation specification is not set for this Model, this Model can still be used for [requesting explanation][google.cloud.aiplatform.v1.PredictionService.Explain] by setting explanation_spec of DeployModelRequest.deployed_model and for [batch explanation][google.cloud.aiplatform.v1.BatchPredictionJob.generate_explanation] by setting explanation_spec of BatchPredictionJob.
-
(::Google::Cloud::AIPlatform::V1::ExplanationSpec) — The default explanation specification for this Model.
The Model can be used for [requesting explanation][google.cloud.aiplatform.v1.PredictionService.Explain] after being deployed if it is populated. The Model can be used for [batch explanation][google.cloud.aiplatform.v1.BatchPredictionJob.generate_explanation] if it is populated.
All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob.
If the default explanation specificati