Reference documentation and code samples for the Cloud Dataproc V1 API class Google::Cloud::Dataproc::V1::ClusterConfig.
The cluster config.
Inherits
- Object
Extended By
- Google::Protobuf::MessageExts::ClassMethods
Includes
- Google::Protobuf::MessageExts
Methods
#autoscaling_config
def autoscaling_config() -> ::Google::Cloud::Dataproc::V1::AutoscalingConfig
- (::Google::Cloud::Dataproc::V1::AutoscalingConfig) — Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
#autoscaling_config=
def autoscaling_config=(value) -> ::Google::Cloud::Dataproc::V1::AutoscalingConfig
- value (::Google::Cloud::Dataproc::V1::AutoscalingConfig) — Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
- (::Google::Cloud::Dataproc::V1::AutoscalingConfig) — Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
#config_bucket
def config_bucket() -> ::String
-
(::String) — Optional. A Cloud Storage bucket used to stage job
dependencies, config files, and job driver console output.
If you do not specify a staging bucket, Cloud
Dataproc will determine a Cloud Storage location (US,
ASIA, or EU) for your cluster's staging bucket according to the
Compute Engine zone where your cluster is deployed, and then create
and manage this project-level, per-location bucket (see
Dataproc staging and temp
buckets).
This field requires a Cloud Storage bucket name, not a
gs://...
URI to a Cloud Storage bucket.
#config_bucket=
def config_bucket=(value) -> ::String
-
value (::String) — Optional. A Cloud Storage bucket used to stage job
dependencies, config files, and job driver console output.
If you do not specify a staging bucket, Cloud
Dataproc will determine a Cloud Storage location (US,
ASIA, or EU) for your cluster's staging bucket according to the
Compute Engine zone where your cluster is deployed, and then create
and manage this project-level, per-location bucket (see
Dataproc staging and temp
buckets).
This field requires a Cloud Storage bucket name, not a
gs://...
URI to a Cloud Storage bucket.
-
(::String) — Optional. A Cloud Storage bucket used to stage job
dependencies, config files, and job driver console output.
If you do not specify a staging bucket, Cloud
Dataproc will determine a Cloud Storage location (US,
ASIA, or EU) for your cluster's staging bucket according to the
Compute Engine zone where your cluster is deployed, and then create
and manage this project-level, per-location bucket (see
Dataproc staging and temp
buckets).
This field requires a Cloud Storage bucket name, not a
gs://...
URI to a Cloud Storage bucket.
#dataproc_metric_config
def dataproc_metric_config() -> ::Google::Cloud::Dataproc::V1::DataprocMetricConfig
- (::Google::Cloud::Dataproc::V1::DataprocMetricConfig) — Optional. The config for Dataproc metrics.
#dataproc_metric_config=
def dataproc_metric_config=(value) -> ::Google::Cloud::Dataproc::V1::DataprocMetricConfig
- value (::Google::Cloud::Dataproc::V1::DataprocMetricConfig) — Optional. The config for Dataproc metrics.
- (::Google::Cloud::Dataproc::V1::DataprocMetricConfig) — Optional. The config for Dataproc metrics.
#encryption_config
def encryption_config() -> ::Google::Cloud::Dataproc::V1::EncryptionConfig
- (::Google::Cloud::Dataproc::V1::EncryptionConfig) — Optional. Encryption settings for the cluster.
#encryption_config=
def encryption_config=(value) -> ::Google::Cloud::Dataproc::V1::EncryptionConfig
- value (::Google::Cloud::Dataproc::V1::EncryptionConfig) — Optional. Encryption settings for the cluster.
- (::Google::Cloud::Dataproc::V1::EncryptionConfig) — Optional. Encryption settings for the cluster.
#endpoint_config
def endpoint_config() -> ::Google::Cloud::Dataproc::V1::EndpointConfig
- (::Google::Cloud::Dataproc::V1::EndpointConfig) — Optional. Port/endpoint configuration for this cluster
#endpoint_config=
def endpoint_config=(value) -> ::Google::Cloud::Dataproc::V1::EndpointConfig
- value (::Google::Cloud::Dataproc::V1::EndpointConfig) — Optional. Port/endpoint configuration for this cluster
- (::Google::Cloud::Dataproc::V1::EndpointConfig) — Optional. Port/endpoint configuration for this cluster
#gce_cluster_config
def gce_cluster_config() -> ::Google::Cloud::Dataproc::V1::GceClusterConfig
- (::Google::Cloud::Dataproc::V1::GceClusterConfig) — Optional. The shared Compute Engine config settings for all instances in a cluster.
#gce_cluster_config=
def gce_cluster_config=(value) -> ::Google::Cloud::Dataproc::V1::GceClusterConfig
- value (::Google::Cloud::Dataproc::V1::GceClusterConfig) — Optional. The shared Compute Engine config settings for all instances in a cluster.
- (::Google::Cloud::Dataproc::V1::GceClusterConfig) — Optional. The shared Compute Engine config settings for all instances in a cluster.
#initialization_actions
def initialization_actions() -> ::Array<::Google::Cloud::Dataproc::V1::NodeInitializationAction>
-
(::Array<::Google::Cloud::Dataproc::V1::NodeInitializationAction>) —
Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's
role
metadata to run an executable on a master or worker node, as shown below usingcurl
(you can also usewget
):ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi
#initialization_actions=
def initialization_actions=(value) -> ::Array<::Google::Cloud::Dataproc::V1::NodeInitializationAction>
-
value (::Array<::Google::Cloud::Dataproc::V1::NodeInitializationAction>) —
Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's
role
metadata to run an executable on a master or worker node, as shown below usingcurl
(you can also usewget
):ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi
-
(::Array<::Google::Cloud::Dataproc::V1::NodeInitializationAction>) —
Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's
role
metadata to run an executable on a master or worker node, as shown below usingcurl
(you can also usewget
):ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi
#lifecycle_config
def lifecycle_config() -> ::Google::Cloud::Dataproc::V1::LifecycleConfig
- (::Google::Cloud::Dataproc::V1::LifecycleConfig) — Optional. Lifecycle setting for the cluster.
#lifecycle_config=
def lifecycle_config=(value) -> ::Google::Cloud::Dataproc::V1::LifecycleConfig
- value (::Google::Cloud::Dataproc::V1::LifecycleConfig) — Optional. Lifecycle setting for the cluster.
- (::Google::Cloud::Dataproc::V1::LifecycleConfig) — Optional. Lifecycle setting for the cluster.
#master_config
def master_config() -> ::Google::Cloud::Dataproc::V1::InstanceGroupConfig
- (::Google::Cloud::Dataproc::V1::InstanceGroupConfig) — Optional. The Compute Engine config settings for the cluster's master instance.
#master_config=
def master_config=(value) -> ::Google::Cloud::Dataproc::V1::InstanceGroupConfig
- value (::Google::Cloud::Dataproc::V1::InstanceGroupConfig) — Optional. The Compute Engine config settings for the cluster's master instance.
- (::Google::Cloud::Dataproc::V1::InstanceGroupConfig) — Optional. The Compute Engine config settings for the cluster's master instance.
#metastore_config
def metastore_config() -> ::Google::Cloud::Dataproc::V1::MetastoreConfig
- (::Google::Cloud::Dataproc::V1::MetastoreConfig) — Optional. Metastore configuration.
#metastore_config=
def metastore_config=(value) -> ::Google::Cloud::Dataproc::V1::MetastoreConfig
- value (::Google::Cloud::Dataproc::V1::MetastoreConfig) — Optional. Metastore configuration.
- (::Google::Cloud::Dataproc::V1::MetastoreConfig) — Optional. Metastore configuration.
#secondary_worker_config
def secondary_worker_config() -> ::Google::Cloud::Dataproc::V1::InstanceGroupConfig
- (::Google::Cloud::Dataproc::V1::InstanceGroupConfig) — Optional. The Compute Engine config settings for a cluster's secondary worker instances
#secondary_worker_config=
def secondary_worker_config=(value) -> ::Google::Cloud::Dataproc::V1::InstanceGroupConfig
- value (::Google::Cloud::Dataproc::V1::InstanceGroupConfig) — Optional. The Compute Engine config settings for a cluster's secondary worker instances
- (::Google::Cloud::Dataproc::V1::InstanceGroupConfig) — Optional. The Compute Engine config settings for a cluster's secondary worker instances
#security_config
def security_config() -> ::Google::Cloud::Dataproc::V1::SecurityConfig
- (::Google::Cloud::Dataproc::V1::SecurityConfig) — Optional. Security settings for the cluster.
#security_config=
def security_config=(value) -> ::Google::Cloud::Dataproc::V1::SecurityConfig
- value (::Google::Cloud::Dataproc::V1::SecurityConfig) — Optional. Security settings for the cluster.
- (::Google::Cloud::Dataproc::V1::SecurityConfig) — Optional. Security settings for the cluster.
#software_config
def software_config() -> ::Google::Cloud::Dataproc::V1::SoftwareConfig
- (::Google::Cloud::Dataproc::V1::SoftwareConfig) — Optional. The config settings for cluster software.
#software_config=
def software_config=(value) -> ::Google::Cloud::Dataproc::V1::SoftwareConfig
- value (::Google::Cloud::Dataproc::V1::SoftwareConfig) — Optional. The config settings for cluster software.
- (::Google::Cloud::Dataproc::V1::SoftwareConfig) — Optional. The config settings for cluster software.
#temp_bucket
def temp_bucket() -> ::String
-
(::String) — Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
such as Spark and MapReduce history files.
If you do not specify a temp bucket,
Dataproc will determine a Cloud Storage location (US,
ASIA, or EU) for your cluster's temp bucket according to the
Compute Engine zone where your cluster is deployed, and then create
and manage this project-level, per-location bucket. The default bucket has
a TTL of 90 days, but you can use any TTL (or none) if you specify a
bucket (see
Dataproc staging and temp
buckets).
This field requires a Cloud Storage bucket name, not a
gs://...
URI to a Cloud Storage bucket.
#temp_bucket=
def temp_bucket=(value) -> ::String
-
value (::String) — Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
such as Spark and MapReduce history files.
If you do not specify a temp bucket,
Dataproc will determine a Cloud Storage location (US,
ASIA, or EU) for your cluster's temp bucket according to the
Compute Engine zone where your cluster is deployed, and then create
and manage this project-level, per-location bucket. The default bucket has
a TTL of 90 days, but you can use any TTL (or none) if you specify a
bucket (see
Dataproc staging and temp
buckets).
This field requires a Cloud Storage bucket name, not a
gs://...
URI to a Cloud Storage bucket.
-
(::String) — Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
such as Spark and MapReduce history files.
If you do not specify a temp bucket,
Dataproc will determine a Cloud Storage location (US,
ASIA, or EU) for your cluster's temp bucket according to the
Compute Engine zone where your cluster is deployed, and then create
and manage this project-level, per-location bucket. The default bucket has
a TTL of 90 days, but you can use any TTL (or none) if you specify a
bucket (see
Dataproc staging and temp
buckets).
This field requires a Cloud Storage bucket name, not a
gs://...
URI to a Cloud Storage bucket.
#worker_config
def worker_config() -> ::Google::Cloud::Dataproc::V1::InstanceGroupConfig
- (::Google::Cloud::Dataproc::V1::InstanceGroupConfig) — Optional. The Compute Engine config settings for the cluster's worker instances.
#worker_config=
def worker_config=(value) -> ::Google::Cloud::Dataproc::V1::InstanceGroupConfig
- value (::Google::Cloud::Dataproc::V1::InstanceGroupConfig) — Optional. The Compute Engine config settings for the cluster's worker instances.
- (::Google::Cloud::Dataproc::V1::InstanceGroupConfig) — Optional. The Compute Engine config settings for the cluster's worker instances.