Reference documentation and code samples for the AI Platform Notebooks V1 API class Google::Cloud::Notebooks::V1::ExecutionTemplate.
The description a notebook execution workload.
Inherits
- Object
Extended By
- Google::Protobuf::MessageExts::ClassMethods
Includes
- Google::Protobuf::MessageExts
Methods
#accelerator_config
def accelerator_config() -> ::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorConfig
- (::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorConfig) — Configuration (count and accelerator type) for hardware running notebook execution.
#accelerator_config=
def accelerator_config=(value) -> ::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorConfig
- value (::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorConfig) — Configuration (count and accelerator type) for hardware running notebook execution.
- (::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorConfig) — Configuration (count and accelerator type) for hardware running notebook execution.
#container_image_uri
def container_image_uri() -> ::String
- (::String) — Container Image URI to a DLVM Example: 'gcr.io/deeplearning-platform-release/base-cu100' More examples can be found at: https://cloud.google.com/ai-platform/deep-learning-containers/docs/choosing-container
#container_image_uri=
def container_image_uri=(value) -> ::String
- value (::String) — Container Image URI to a DLVM Example: 'gcr.io/deeplearning-platform-release/base-cu100' More examples can be found at: https://cloud.google.com/ai-platform/deep-learning-containers/docs/choosing-container
- (::String) — Container Image URI to a DLVM Example: 'gcr.io/deeplearning-platform-release/base-cu100' More examples can be found at: https://cloud.google.com/ai-platform/deep-learning-containers/docs/choosing-container
#dataproc_parameters
def dataproc_parameters() -> ::Google::Cloud::Notebooks::V1::ExecutionTemplate::DataprocParameters
- (::Google::Cloud::Notebooks::V1::ExecutionTemplate::DataprocParameters) — Parameters used in Dataproc JobType executions.
#dataproc_parameters=
def dataproc_parameters=(value) -> ::Google::Cloud::Notebooks::V1::ExecutionTemplate::DataprocParameters
- value (::Google::Cloud::Notebooks::V1::ExecutionTemplate::DataprocParameters) — Parameters used in Dataproc JobType executions.
- (::Google::Cloud::Notebooks::V1::ExecutionTemplate::DataprocParameters) — Parameters used in Dataproc JobType executions.
#input_notebook_file
def input_notebook_file() -> ::String
-
(::String) — Path to the notebook file to execute.
Must be in a Google Cloud Storage bucket.
Format:
gs://{bucket_name}/{folder}/{notebook_file_name}
Ex:gs://notebook_user/scheduled_notebooks/sentiment_notebook.ipynb
#input_notebook_file=
def input_notebook_file=(value) -> ::String
-
value (::String) — Path to the notebook file to execute.
Must be in a Google Cloud Storage bucket.
Format:
gs://{bucket_name}/{folder}/{notebook_file_name}
Ex:gs://notebook_user/scheduled_notebooks/sentiment_notebook.ipynb
-
(::String) — Path to the notebook file to execute.
Must be in a Google Cloud Storage bucket.
Format:
gs://{bucket_name}/{folder}/{notebook_file_name}
Ex:gs://notebook_user/scheduled_notebooks/sentiment_notebook.ipynb
#job_type
def job_type() -> ::Google::Cloud::Notebooks::V1::ExecutionTemplate::JobType
- (::Google::Cloud::Notebooks::V1::ExecutionTemplate::JobType) — The type of Job to be used on this execution.
#job_type=
def job_type=(value) -> ::Google::Cloud::Notebooks::V1::ExecutionTemplate::JobType
- value (::Google::Cloud::Notebooks::V1::ExecutionTemplate::JobType) — The type of Job to be used on this execution.
- (::Google::Cloud::Notebooks::V1::ExecutionTemplate::JobType) — The type of Job to be used on this execution.
#kernel_spec
def kernel_spec() -> ::String
- (::String) — Name of the kernel spec to use. This must be specified if the kernel spec name on the execution target does not match the name in the input notebook file.
#kernel_spec=
def kernel_spec=(value) -> ::String
- value (::String) — Name of the kernel spec to use. This must be specified if the kernel spec name on the execution target does not match the name in the input notebook file.
- (::String) — Name of the kernel spec to use. This must be specified if the kernel spec name on the execution target does not match the name in the input notebook file.
#labels
def labels() -> ::Google::Protobuf::Map{::String => ::String}
- (::Google::Protobuf::Map{::String => ::String}) — Labels for execution. If execution is scheduled, a field included will be 'nbs-scheduled'. Otherwise, it is an immediate execution, and an included field will be 'nbs-immediate'. Use fields to efficiently index between various types of executions.
#labels=
def labels=(value) -> ::Google::Protobuf::Map{::String => ::String}
- value (::Google::Protobuf::Map{::String => ::String}) — Labels for execution. If execution is scheduled, a field included will be 'nbs-scheduled'. Otherwise, it is an immediate execution, and an included field will be 'nbs-immediate'. Use fields to efficiently index between various types of executions.
- (::Google::Protobuf::Map{::String => ::String}) — Labels for execution. If execution is scheduled, a field included will be 'nbs-scheduled'. Otherwise, it is an immediate execution, and an included field will be 'nbs-immediate'. Use fields to efficiently index between various types of executions.
#master_type
def master_type() -> ::String
-
(::String) — Specifies the type of virtual machine to use for your training
job's master worker. You must specify this field when
scaleTier
is set toCUSTOM
.You can use certain Compute Engine machine types directly in this field. The following types are supported:
n1-standard-4
n1-standard-8
n1-standard-16
n1-standard-32
n1-standard-64
n1-standard-96
n1-highmem-2
n1-highmem-4
n1-highmem-8
n1-highmem-16
n1-highmem-32
n1-highmem-64
n1-highmem-96
n1-highcpu-16
n1-highcpu-32
n1-highcpu-64
n1-highcpu-96
Alternatively, you can use the following legacy machine types:
standard
large_model
complex_model_s
complex_model_m
complex_model_l
standard_gpu
complex_model_m_gpu
complex_model_l_gpu
standard_p100
complex_model_m_p100
standard_v100
large_model_v100
complex_model_m_v100
complex_model_l_v100
Finally, if you want to use a TPU for training, specify
cloud_tpu
in this field. Learn more about the special configuration options for training with TPU.
#master_type=
def master_type=(value) -> ::String
-
value (::String) — Specifies the type of virtual machine to use for your training
job's master worker. You must specify this field when
scaleTier
is set toCUSTOM
.You can use certain Compute Engine machine types directly in this field. The following types are supported:
n1-standard-4
n1-standard-8
n1-standard-16
n1-standard-32
n1-standard-64
n1-standard-96
n1-highmem-2
n1-highmem-4
n1-highmem-8
n1-highmem-16
n1-highmem-32
n1-highmem-64
n1-highmem-96
n1-highcpu-16
n1-highcpu-32
n1-highcpu-64
n1-highcpu-96
Alternatively, you can use the following legacy machine types:
standard
large_model
complex_model_s
complex_model_m
complex_model_l
standard_gpu
complex_model_m_gpu
complex_model_l_gpu
standard_p100
complex_model_m_p100
standard_v100
large_model_v100
complex_model_m_v100
complex_model_l_v100
Finally, if you want to use a TPU for training, specify
cloud_tpu
in this field. Learn more about the special configuration options for training with TPU.
-
(::String) — Specifies the type of virtual machine to use for your training
job's master worker. You must specify this field when
scaleTier
is set toCUSTOM
.You can use certain Compute Engine machine types directly in this field. The following types are supported:
n1-standard-4
n1-standard-8
n1-standard-16
n1-standard-32
n1-standard-64
n1-standard-96
n1-highmem-2
n1-highmem-4
n1-highmem-8
n1-highmem-16
n1-highmem-32
n1-highmem-64
n1-highmem-96
n1-highcpu-16
n1-highcpu-32
n1-highcpu-64
n1-highcpu-96
Alternatively, you can use the following legacy machine types:
standard
large_model
complex_model_s
complex_model_m
complex_model_l
standard_gpu
complex_model_m_gpu
complex_model_l_gpu
standard_p100
complex_model_m_p100
standard_v100
large_model_v100
complex_model_m_v100
complex_model_l_v100
Finally, if you want to use a TPU for training, specify
cloud_tpu
in this field. Learn more about the special configuration options for training with TPU.
#output_notebook_folder
def output_notebook_folder() -> ::String
-
(::String) — Path to the notebook folder to write to.
Must be in a Google Cloud Storage bucket path.
Format:
gs://{bucket_name}/{folder}
Ex:gs://notebook_user/scheduled_notebooks
#output_notebook_folder=
def output_notebook_folder=(value) -> ::String
-
value (::String) — Path to the notebook folder to write to.
Must be in a Google Cloud Storage bucket path.
Format:
gs://{bucket_name}/{folder}
Ex:gs://notebook_user/scheduled_notebooks
-
(::String) — Path to the notebook folder to write to.
Must be in a Google Cloud Storage bucket path.
Format:
gs://{bucket_name}/{folder}
Ex:gs://notebook_user/scheduled_notebooks
#parameters
def parameters() -> ::String
- (::String) — Parameters used within the 'input_notebook_file' notebook.
#parameters=
def parameters=(value) -> ::String
- value (::String) — Parameters used within the 'input_notebook_file' notebook.
- (::String) — Parameters used within the 'input_notebook_file' notebook.
#params_yaml_file
def params_yaml_file() -> ::String
-
(::String) — Parameters to be overridden in the notebook during execution.
Ref https://papermill.readthedocs.io/en/latest/usage-parameterize.html on
how to specifying parameters in the input notebook and pass them here
in an YAML file.
Ex:
gs://notebook_user/scheduled_notebooks/sentiment_notebook_params.yaml
#params_yaml_file=
def params_yaml_file=(value) -> ::String
-
value (::String) — Parameters to be overridden in the notebook during execution.
Ref https://papermill.readthedocs.io/en/latest/usage-parameterize.html on
how to specifying parameters in the input notebook and pass them here
in an YAML file.
Ex:
gs://notebook_user/scheduled_notebooks/sentiment_notebook_params.yaml
-
(::String) — Parameters to be overridden in the notebook during execution.
Ref https://papermill.readthedocs.io/en/latest/usage-parameterize.html on
how to specifying parameters in the input notebook and pass them here
in an YAML file.
Ex:
gs://notebook_user/scheduled_notebooks/sentiment_notebook_params.yaml
#scale_tier
def scale_tier() -> ::Google::Cloud::Notebooks::V1::ExecutionTemplate::ScaleTier
- (::Google::Cloud::Notebooks::V1::ExecutionTemplate::ScaleTier) — Required. Scale tier of the hardware used for notebook execution. DEPRECATED Will be discontinued. As right now only CUSTOM is supported.
#scale_tier=
def scale_tier=(value) -> ::Google::Cloud::Notebooks::V1::ExecutionTemplate::ScaleTier
- value (::Google::Cloud::Notebooks::V1::ExecutionTemplate::ScaleTier) — Required. Scale tier of the hardware used for notebook execution. DEPRECATED Will be discontinued. As right now only CUSTOM is supported.
- (::Google::Cloud::Notebooks::V1::ExecutionTemplate::ScaleTier) — Required. Scale tier of the hardware used for notebook execution. DEPRECATED Will be discontinued. As right now only CUSTOM is supported.
#service_account
def service_account() -> ::String
-
(::String) — The email address of a service account to use when running the execution.
You must have the
iam.serviceAccounts.actAs
permission for the specified service account.
#service_account=
def service_account=(value) -> ::String
-
value (::String) — The email address of a service account to use when running the execution.
You must have the
iam.serviceAccounts.actAs
permission for the specified service account.
-
(::String) — The email address of a service account to use when running the execution.
You must have the
iam.serviceAccounts.actAs
permission for the specified service account.
#tensorboard
def tensorboard() -> ::String
-
(::String) — The name of a Vertex AI [Tensorboard] resource to which this execution
will upload Tensorboard logs.
Format:
projects/{project}/locations/{location}/tensorboards/{tensorboard}
#tensorboard=
def tensorboard=(value) -> ::String
-
value (::String) — The name of a Vertex AI [Tensorboard] resource to which this execution
will upload Tensorboard logs.
Format:
projects/{project}/locations/{location}/tensorboards/{tensorboard}
-
(::String) — The name of a Vertex AI [Tensorboard] resource to which this execution
will upload Tensorboard logs.
Format:
projects/{project}/locations/{location}/tensorboards/{tensorboard}
#vertex_ai_parameters
def vertex_ai_parameters() -> ::Google::Cloud::Notebooks::V1::ExecutionTemplate::VertexAIParameters
- (::Google::Cloud::Notebooks::V1::ExecutionTemplate::VertexAIParameters) — Parameters used in Vertex AI JobType executions.
#vertex_ai_parameters=
def vertex_ai_parameters=(value) -> ::Google::Cloud::Notebooks::V1::ExecutionTemplate::VertexAIParameters
- value (::Google::Cloud::Notebooks::V1::ExecutionTemplate::VertexAIParameters) — Parameters used in Vertex AI JobType executions.
- (::Google::Cloud::Notebooks::V1::ExecutionTemplate::VertexAIParameters) — Parameters used in Vertex AI JobType executions.