Reference documentation and code samples for the Dataflow V1beta3 API class Google::Cloud::Dataflow::V1beta3::WorkerPool.
Describes one particular pool of Cloud Dataflow workers to be instantiated by the Cloud Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
Inherits
- Object
Extended By
- Google::Protobuf::MessageExts::ClassMethods
Includes
- Google::Protobuf::MessageExts
Methods
#autoscaling_settings
def autoscaling_settings() -> ::Google::Cloud::Dataflow::V1beta3::AutoscalingSettings
- (::Google::Cloud::Dataflow::V1beta3::AutoscalingSettings) — Settings for autoscaling of this WorkerPool.
#autoscaling_settings=
def autoscaling_settings=(value) -> ::Google::Cloud::Dataflow::V1beta3::AutoscalingSettings
- value (::Google::Cloud::Dataflow::V1beta3::AutoscalingSettings) — Settings for autoscaling of this WorkerPool.
- (::Google::Cloud::Dataflow::V1beta3::AutoscalingSettings) — Settings for autoscaling of this WorkerPool.
#data_disks
def data_disks() -> ::Array<::Google::Cloud::Dataflow::V1beta3::Disk>
- (::Array<::Google::Cloud::Dataflow::V1beta3::Disk>) — Data disks that are used by a VM in this workflow.
#data_disks=
def data_disks=(value) -> ::Array<::Google::Cloud::Dataflow::V1beta3::Disk>
- value (::Array<::Google::Cloud::Dataflow::V1beta3::Disk>) — Data disks that are used by a VM in this workflow.
- (::Array<::Google::Cloud::Dataflow::V1beta3::Disk>) — Data disks that are used by a VM in this workflow.
#default_package_set
def default_package_set() -> ::Google::Cloud::Dataflow::V1beta3::DefaultPackageSet
- (::Google::Cloud::Dataflow::V1beta3::DefaultPackageSet) — The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
#default_package_set=
def default_package_set=(value) -> ::Google::Cloud::Dataflow::V1beta3::DefaultPackageSet
- value (::Google::Cloud::Dataflow::V1beta3::DefaultPackageSet) — The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
- (::Google::Cloud::Dataflow::V1beta3::DefaultPackageSet) — The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
#disk_size_gb
def disk_size_gb() -> ::Integer
- (::Integer) — Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
#disk_size_gb=
def disk_size_gb=(value) -> ::Integer
- value (::Integer) — Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
- (::Integer) — Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
#disk_source_image
def disk_source_image() -> ::String
- (::String) — Fully qualified source image for disks.
#disk_source_image=
def disk_source_image=(value) -> ::String
- value (::String) — Fully qualified source image for disks.
- (::String) — Fully qualified source image for disks.
#disk_type
def disk_type() -> ::String
- (::String) — Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
#disk_type=
def disk_type=(value) -> ::String
- value (::String) — Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
- (::String) — Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
#ip_configuration
def ip_configuration() -> ::Google::Cloud::Dataflow::V1beta3::WorkerIPAddressConfiguration
- (::Google::Cloud::Dataflow::V1beta3::WorkerIPAddressConfiguration) — Configuration for VM IPs.
#ip_configuration=
def ip_configuration=(value) -> ::Google::Cloud::Dataflow::V1beta3::WorkerIPAddressConfiguration
- value (::Google::Cloud::Dataflow::V1beta3::WorkerIPAddressConfiguration) — Configuration for VM IPs.
- (::Google::Cloud::Dataflow::V1beta3::WorkerIPAddressConfiguration) — Configuration for VM IPs.
#kind
def kind() -> ::String
-
(::String) — The kind of the worker pool; currently only
harness
andshuffle
are supported.
#kind=
def kind=(value) -> ::String
-
value (::String) — The kind of the worker pool; currently only
harness
andshuffle
are supported.
-
(::String) — The kind of the worker pool; currently only
harness
andshuffle
are supported.
#machine_type
def machine_type() -> ::String
- (::String) — Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
#machine_type=
def machine_type=(value) -> ::String
- value (::String) — Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
- (::String) — Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
#metadata
def metadata() -> ::Google::Protobuf::Map{::String => ::String}
- (::Google::Protobuf::Map{::String => ::String}) — Metadata to set on the Google Compute Engine VMs.
#metadata=
def metadata=(value) -> ::Google::Protobuf::Map{::String => ::String}
- value (::Google::Protobuf::Map{::String => ::String}) — Metadata to set on the Google Compute Engine VMs.
- (::Google::Protobuf::Map{::String => ::String}) — Metadata to set on the Google Compute Engine VMs.
#network
def network() -> ::String
- (::String) — Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
#network=
def network=(value) -> ::String
- value (::String) — Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- (::String) — Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
#num_threads_per_worker
def num_threads_per_worker() -> ::Integer
- (::Integer) — The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
#num_threads_per_worker=
def num_threads_per_worker=(value) -> ::Integer
- value (::Integer) — The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
- (::Integer) — The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
#num_workers
def num_workers() -> ::Integer
- (::Integer) — Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
#num_workers=
def num_workers=(value) -> ::Integer
- value (::Integer) — Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
- (::Integer) — Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
#on_host_maintenance
def on_host_maintenance() -> ::String
- (::String) — The action to take on host maintenance, as defined by the Google Compute Engine API.
#on_host_maintenance=
def on_host_maintenance=(value) -> ::String
- value (::String) — The action to take on host maintenance, as defined by the Google Compute Engine API.
- (::String) — The action to take on host maintenance, as defined by the Google Compute Engine API.
#packages
def packages() -> ::Array<::Google::Cloud::Dataflow::V1beta3::Package>
- (::Array<::Google::Cloud::Dataflow::V1beta3::Package>) — Packages to be installed on workers.
#packages=
def packages=(value) -> ::Array<::Google::Cloud::Dataflow::V1beta3::Package>
- value (::Array<::Google::Cloud::Dataflow::V1beta3::Package>) — Packages to be installed on workers.
- (::Array<::Google::Cloud::Dataflow::V1beta3::Package>) — Packages to be installed on workers.
#pool_args
def pool_args() -> ::Google::Protobuf::Any
- (::Google::Protobuf::Any) — Extra arguments for this worker pool.
#pool_args=
def pool_args=(value) -> ::Google::Protobuf::Any
- value (::Google::Protobuf::Any) — Extra arguments for this worker pool.
- (::Google::Protobuf::Any) — Extra arguments for this worker pool.
#sdk_harness_container_images
def sdk_harness_container_images() -> ::Array<::Google::Cloud::Dataflow::V1beta3::SdkHarnessContainerImage>
- (::Array<::Google::Cloud::Dataflow::V1beta3::SdkHarnessContainerImage>) — Set of SDK harness containers needed to execute this pipeline. This will only be set in the Fn API path. For non-cross-language pipelines this should have only one entry. Cross-language pipelines will have two or more entries.
#sdk_harness_container_images=
def sdk_harness_container_images=(value) -> ::Array<::Google::Cloud::Dataflow::V1beta3::SdkHarnessContainerImage>
- value (::Array<::Google::Cloud::Dataflow::V1beta3::SdkHarnessContainerImage>) — Set of SDK harness containers needed to execute this pipeline. This will only be set in the Fn API path. For non-cross-language pipelines this should have only one entry. Cross-language pipelines will have two or more entries.
- (::Array<::Google::Cloud::Dataflow::V1beta3::SdkHarnessContainerImage>) — Set of SDK harness containers needed to execute this pipeline. This will only be set in the Fn API path. For non-cross-language pipelines this should have only one entry. Cross-language pipelines will have two or more entries.
#subnetwork
def subnetwork() -> ::String
- (::String) — Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
#subnetwork=
def subnetwork=(value) -> ::String
- value (::String) — Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
- (::String) — Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
#taskrunner_settings
def taskrunner_settings() -> ::Google::Cloud::Dataflow::V1beta3::TaskRunnerSettings
- (::Google::Cloud::Dataflow::V1beta3::TaskRunnerSettings) — Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
#taskrunner_settings=
def taskrunner_settings=(value) -> ::Google::Cloud::Dataflow::V1beta3::TaskRunnerSettings
- value (::Google::Cloud::Dataflow::V1beta3::TaskRunnerSettings) — Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
- (::Google::Cloud::Dataflow::V1beta3::TaskRunnerSettings) — Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
#teardown_policy
def teardown_policy() -> ::Google::Cloud::Dataflow::V1beta3::TeardownPolicy
-
(::Google::Cloud::Dataflow::V1beta3::TeardownPolicy) — Sets the policy for determining when to turndown worker pool.
Allowed values are:
TEARDOWN_ALWAYS
,TEARDOWN_ON_SUCCESS
, andTEARDOWN_NEVER
.TEARDOWN_ALWAYS
means workers are always torn down regardless of whether the job succeeds.TEARDOWN_ON_SUCCESS
means workers are torn down if the job succeeds.TEARDOWN_NEVER
means the workers are never torn down.If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the
TEARDOWN_ALWAYS
policy except for small, manually supervised test jobs.If unknown or unspecified, the service will attempt to choose a reasonable default.
#teardown_policy=
def teardown_policy=(value) -> ::Google::Cloud::Dataflow::V1beta3::TeardownPolicy
-
value (::Google::Cloud::Dataflow::V1beta3::TeardownPolicy) — Sets the policy for determining when to turndown worker pool.
Allowed values are:
TEARDOWN_ALWAYS
,TEARDOWN_ON_SUCCESS
, andTEARDOWN_NEVER
.TEARDOWN_ALWAYS
means workers are always torn down regardless of whether the job succeeds.TEARDOWN_ON_SUCCESS
means workers are torn down if the job succeeds.TEARDOWN_NEVER
means the workers are never torn down.If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the
TEARDOWN_ALWAYS
policy except for small, manually supervised test jobs.If unknown or unspecified, the service will attempt to choose a reasonable default.
-
(::Google::Cloud::Dataflow::V1beta3::TeardownPolicy) — Sets the policy for determining when to turndown worker pool.
Allowed values are:
TEARDOWN_ALWAYS
,TEARDOWN_ON_SUCCESS
, andTEARDOWN_NEVER
.TEARDOWN_ALWAYS
means workers are always torn down regardless of whether the job succeeds.TEARDOWN_ON_SUCCESS
means workers are torn down if the job succeeds.TEARDOWN_NEVER
means the workers are never torn down.If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the
TEARDOWN_ALWAYS
policy except for small, manually supervised test jobs.If unknown or unspecified, the service will attempt to choose a reasonable default.
#worker_harness_container_image
def worker_harness_container_image() -> ::String
-
(::String) — Required. Docker container image that executes the Cloud Dataflow worker
harness, residing in Google Container Registry.
Deprecated for the Fn API path. Use sdk_harness_container_images instead.
#worker_harness_container_image=
def worker_harness_container_image=(value) -> ::String
-
value (::String) — Required. Docker container image that executes the Cloud Dataflow worker
harness, residing in Google Container Registry.
Deprecated for the Fn API path. Use sdk_harness_container_images instead.
-
(::String) — Required. Docker container image that executes the Cloud Dataflow worker
harness, residing in Google Container Registry.
Deprecated for the Fn API path. Use sdk_harness_container_images instead.
#zone
def zone() -> ::String
- (::String) — Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
#zone=
def zone=(value) -> ::String
- value (::String) — Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
- (::String) — Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.