Package cloud.google.com/go/dataproc/apiv1/dataprocpb (v1.9.0)

Variables

Batch_State_name, Batch_State_value

var (
	Batch_State_name = map[int32]string{
		0: "STATE_UNSPECIFIED",
		1: "PENDING",
		2: "RUNNING",
		3: "CANCELLING",
		4: "CANCELLED",
		5: "SUCCEEDED",
		6: "FAILED",
	}
	Batch_State_value = map[string]int32{
		"STATE_UNSPECIFIED": 0,
		"PENDING":           1,
		"RUNNING":           2,
		"CANCELLING":        3,
		"CANCELLED":         4,
		"SUCCEEDED":         5,
		"FAILED":            6,
	}
)

Enum value maps for Batch_State.

GceClusterConfig_PrivateIpv6GoogleAccess_name, GceClusterConfig_PrivateIpv6GoogleAccess_value

var (
	GceClusterConfig_PrivateIpv6GoogleAccess_name = map[int32]string{
		0: "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED",
		1: "INHERIT_FROM_SUBNETWORK",
		2: "OUTBOUND",
		3: "BIDIRECTIONAL",
	}
	GceClusterConfig_PrivateIpv6GoogleAccess_value = map[string]int32{
		"PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED": 0,
		"INHERIT_FROM_SUBNETWORK":                1,
		"OUTBOUND":                               2,
		"BIDIRECTIONAL":                          3,
	}
)

Enum value maps for GceClusterConfig_PrivateIpv6GoogleAccess.

InstanceGroupConfig_Preemptibility_name, InstanceGroupConfig_Preemptibility_value

var (
	InstanceGroupConfig_Preemptibility_name = map[int32]string{
		0: "PREEMPTIBILITY_UNSPECIFIED",
		1: "NON_PREEMPTIBLE",
		2: "PREEMPTIBLE",
	}
	InstanceGroupConfig_Preemptibility_value = map[string]int32{
		"PREEMPTIBILITY_UNSPECIFIED": 0,
		"NON_PREEMPTIBLE":            1,
		"PREEMPTIBLE":                2,
	}
)

Enum value maps for InstanceGroupConfig_Preemptibility.

NodeGroup_Role_name, NodeGroup_Role_value

var (
	NodeGroup_Role_name = map[int32]string{
		0: "ROLE_UNSPECIFIED",
		1: "DRIVER",
	}
	NodeGroup_Role_value = map[string]int32{
		"ROLE_UNSPECIFIED": 0,
		"DRIVER":           1,
	}
)

Enum value maps for NodeGroup_Role.

ClusterStatus_State_name, ClusterStatus_State_value

var (
	ClusterStatus_State_name = map[int32]string{
		0: "UNKNOWN",
		1: "CREATING",
		2: "RUNNING",
		3: "ERROR",
		9: "ERROR_DUE_TO_UPDATE",
		4: "DELETING",
		5: "UPDATING",
		6: "STOPPING",
		7: "STOPPED",
		8: "STARTING",
	}
	ClusterStatus_State_value = map[string]int32{
		"UNKNOWN":             0,
		"CREATING":            1,
		"RUNNING":             2,
		"ERROR":               3,
		"ERROR_DUE_TO_UPDATE": 9,
		"DELETING":            4,
		"UPDATING":            5,
		"STOPPING":            6,
		"STOPPED":             7,
		"STARTING":            8,
	}
)

Enum value maps for ClusterStatus_State.

ClusterStatus_Substate_name, ClusterStatus_Substate_value

var (
	ClusterStatus_Substate_name = map[int32]string{
		0: "UNSPECIFIED",
		1: "UNHEALTHY",
		2: "STALE_STATUS",
	}
	ClusterStatus_Substate_value = map[string]int32{
		"UNSPECIFIED":  0,
		"UNHEALTHY":    1,
		"STALE_STATUS": 2,
	}
)

Enum value maps for ClusterStatus_Substate.

DataprocMetricConfig_MetricSource_name, DataprocMetricConfig_MetricSource_value

var (
	DataprocMetricConfig_MetricSource_name = map[int32]string{
		0: "METRIC_SOURCE_UNSPECIFIED",
		1: "MONITORING_AGENT_DEFAULTS",
		2: "HDFS",
		3: "SPARK",
		4: "YARN",
		5: "SPARK_HISTORY_SERVER",
		6: "HIVESERVER2",
	}
	DataprocMetricConfig_MetricSource_value = map[string]int32{
		"METRIC_SOURCE_UNSPECIFIED": 0,
		"MONITORING_AGENT_DEFAULTS": 1,
		"HDFS":                      2,
		"SPARK":                     3,
		"YARN":                      4,
		"SPARK_HISTORY_SERVER":      5,
		"HIVESERVER2":               6,
	}
)

Enum value maps for DataprocMetricConfig_MetricSource.

ReservationAffinity_Type_name, ReservationAffinity_Type_value

var (
	ReservationAffinity_Type_name = map[int32]string{
		0: "TYPE_UNSPECIFIED",
		1: "NO_RESERVATION",
		2: "ANY_RESERVATION",
		3: "SPECIFIC_RESERVATION",
	}
	ReservationAffinity_Type_value = map[string]int32{
		"TYPE_UNSPECIFIED":     0,
		"NO_RESERVATION":       1,
		"ANY_RESERVATION":      2,
		"SPECIFIC_RESERVATION": 3,
	}
)

Enum value maps for ReservationAffinity_Type.

LoggingConfig_Level_name, LoggingConfig_Level_value

var (
	LoggingConfig_Level_name = map[int32]string{
		0: "LEVEL_UNSPECIFIED",
		1: "ALL",
		2: "TRACE",
		3: "DEBUG",
		4: "INFO",
		5: "WARN",
		6: "ERROR",
		7: "FATAL",
		8: "OFF",
	}
	LoggingConfig_Level_value = map[string]int32{
		"LEVEL_UNSPECIFIED": 0,
		"ALL":               1,
		"TRACE":             2,
		"DEBUG":             3,
		"INFO":              4,
		"WARN":              5,
		"ERROR":             6,
		"FATAL":             7,
		"OFF":               8,
	}
)

Enum value maps for LoggingConfig_Level.

JobStatus_State_name, JobStatus_State_value

var (
	JobStatus_State_name = map[int32]string{
		0: "STATE_UNSPECIFIED",
		1: "PENDING",
		8: "SETUP_DONE",
		2: "RUNNING",
		3: "CANCEL_PENDING",
		7: "CANCEL_STARTED",
		4: "CANCELLED",
		5: "DONE",
		6: "ERROR",
		9: "ATTEMPT_FAILURE",
	}
	JobStatus_State_value = map[string]int32{
		"STATE_UNSPECIFIED": 0,
		"PENDING":           1,
		"SETUP_DONE":        8,
		"RUNNING":           2,
		"CANCEL_PENDING":    3,
		"CANCEL_STARTED":    7,
		"CANCELLED":         4,
		"DONE":              5,
		"ERROR":             6,
		"ATTEMPT_FAILURE":   9,
	}
)

Enum value maps for JobStatus_State.

JobStatus_Substate_name, JobStatus_Substate_value

var (
	JobStatus_Substate_name = map[int32]string{
		0: "UNSPECIFIED",
		1: "SUBMITTED",
		2: "QUEUED",
		3: "STALE_STATUS",
	}
	JobStatus_Substate_value = map[string]int32{
		"UNSPECIFIED":  0,
		"SUBMITTED":    1,
		"QUEUED":       2,
		"STALE_STATUS": 3,
	}
)

Enum value maps for JobStatus_Substate.

YarnApplication_State_name, YarnApplication_State_value

var (
	YarnApplication_State_name = map[int32]string{
		0: "STATE_UNSPECIFIED",
		1: "NEW",
		2: "NEW_SAVING",
		3: "SUBMITTED",
		4: "ACCEPTED",
		5: "RUNNING",
		6: "FINISHED",
		7: "FAILED",
		8: "KILLED",
	}
	YarnApplication_State_value = map[string]int32{
		"STATE_UNSPECIFIED": 0,
		"NEW":               1,
		"NEW_SAVING":        2,
		"SUBMITTED":         3,
		"ACCEPTED":          4,
		"RUNNING":           5,
		"FINISHED":          6,
		"FAILED":            7,
		"KILLED":            8,
	}
)

Enum value maps for YarnApplication_State.

ListJobsRequest_JobStateMatcher_name, ListJobsRequest_JobStateMatcher_value

var (
	ListJobsRequest_JobStateMatcher_name = map[int32]string{
		0: "ALL",
		1: "ACTIVE",
		2: "NON_ACTIVE",
	}
	ListJobsRequest_JobStateMatcher_value = map[string]int32{
		"ALL":        0,
		"ACTIVE":     1,
		"NON_ACTIVE": 2,
	}
)

Enum value maps for ListJobsRequest_JobStateMatcher.

BatchOperationMetadata_BatchOperationType_name, BatchOperationMetadata_BatchOperationType_value

var (
	BatchOperationMetadata_BatchOperationType_name = map[int32]string{
		0: "BATCH_OPERATION_TYPE_UNSPECIFIED",
		1: "BATCH",
	}
	BatchOperationMetadata_BatchOperationType_value = map[string]int32{
		"BATCH_OPERATION_TYPE_UNSPECIFIED": 0,
		"BATCH":                            1,
	}
)

Enum value maps for BatchOperationMetadata_BatchOperationType.

ClusterOperationStatus_State_name, ClusterOperationStatus_State_value

var (
	ClusterOperationStatus_State_name = map[int32]string{
		0: "UNKNOWN",
		1: "PENDING",
		2: "RUNNING",
		3: "DONE",
	}
	ClusterOperationStatus_State_value = map[string]int32{
		"UNKNOWN": 0,
		"PENDING": 1,
		"RUNNING": 2,
		"DONE":    3,
	}
)

Enum value maps for ClusterOperationStatus_State.

NodeGroupOperationMetadata_NodeGroupOperationType_name, NodeGroupOperationMetadata_NodeGroupOperationType_value

var (
	NodeGroupOperationMetadata_NodeGroupOperationType_name = map[int32]string{
		0: "NODE_GROUP_OPERATION_TYPE_UNSPECIFIED",
		1: "CREATE",
		2: "UPDATE",
		3: "DELETE",
		4: "RESIZE",
	}
	NodeGroupOperationMetadata_NodeGroupOperationType_value = map[string]int32{
		"NODE_GROUP_OPERATION_TYPE_UNSPECIFIED": 0,
		"CREATE":                                1,
		"UPDATE":                                2,
		"DELETE":                                3,
		"RESIZE":                                4,
	}
)

Enum value maps for NodeGroupOperationMetadata_NodeGroupOperationType.

Component_name, Component_value

var (
	Component_name = map[int32]string{
		0:  "COMPONENT_UNSPECIFIED",
		5:  "ANACONDA",
		13: "DOCKER",
		9:  "DRUID",
		14: "FLINK",
		11: "HBASE",
		3:  "HIVE_WEBHCAT",
		1:  "JUPYTER",
		6:  "PRESTO",
		12: "RANGER",
		10: "SOLR",
		4:  "ZEPPELIN",
		8:  "ZOOKEEPER",
	}
	Component_value = map[string]int32{
		"COMPONENT_UNSPECIFIED": 0,
		"ANACONDA":              5,
		"DOCKER":                13,
		"DRUID":                 9,
		"FLINK":                 14,
		"HBASE":                 11,
		"HIVE_WEBHCAT":          3,
		"JUPYTER":               1,
		"PRESTO":                6,
		"RANGER":                12,
		"SOLR":                  10,
		"ZEPPELIN":              4,
		"ZOOKEEPER":             8,
	}
)

Enum value maps for Component.

FailureAction_name, FailureAction_value

var (
	FailureAction_name = map[int32]string{
		0: "FAILURE_ACTION_UNSPECIFIED",
		1: "NO_ACTION",
		2: "DELETE",
	}
	FailureAction_value = map[string]int32{
		"FAILURE_ACTION_UNSPECIFIED": 0,
		"NO_ACTION":                  1,
		"DELETE":                     2,
	}
)

Enum value maps for FailureAction.

GkeNodePoolTarget_Role_name, GkeNodePoolTarget_Role_value

var (
	GkeNodePoolTarget_Role_name = map[int32]string{
		0: "ROLE_UNSPECIFIED",
		1: "DEFAULT",
		2: "CONTROLLER",
		3: "SPARK_DRIVER",
		4: "SPARK_EXECUTOR",
	}
	GkeNodePoolTarget_Role_value = map[string]int32{
		"ROLE_UNSPECIFIED": 0,
		"DEFAULT":          1,
		"CONTROLLER":       2,
		"SPARK_DRIVER":     3,
		"SPARK_EXECUTOR":   4,
	}
)

Enum value maps for GkeNodePoolTarget_Role.

WorkflowMetadata_State_name, WorkflowMetadata_State_value

var (
	WorkflowMetadata_State_name = map[int32]string{
		0: "UNKNOWN",
		1: "PENDING",
		2: "RUNNING",
		3: "DONE",
	}
	WorkflowMetadata_State_value = map[string]int32{
		"UNKNOWN": 0,
		"PENDING": 1,
		"RUNNING": 2,
		"DONE":    3,
	}
)

Enum value maps for WorkflowMetadata_State.

WorkflowNode_NodeState_name, WorkflowNode_NodeState_value

var (
	WorkflowNode_NodeState_name = map[int32]string{
		0: "NODE_STATE_UNSPECIFIED",
		1: "BLOCKED",
		2: "RUNNABLE",
		3: "RUNNING",
		4: "COMPLETED",
		5: "FAILED",
	}
	WorkflowNode_NodeState_value = map[string]int32{
		"NODE_STATE_UNSPECIFIED": 0,
		"BLOCKED":                1,
		"RUNNABLE":               2,
		"RUNNING":                3,
		"COMPLETED":              4,
		"FAILED":                 5,
	}
)

Enum value maps for WorkflowNode_NodeState.

File_google_cloud_dataproc_v1_autoscaling_policies_proto

var File_google_cloud_dataproc_v1_autoscaling_policies_proto protoreflect.FileDescriptor

File_google_cloud_dataproc_v1_batches_proto

var File_google_cloud_dataproc_v1_batches_proto protoreflect.FileDescriptor

File_google_cloud_dataproc_v1_clusters_proto

var File_google_cloud_dataproc_v1_clusters_proto protoreflect.FileDescriptor

File_google_cloud_dataproc_v1_jobs_proto

var File_google_cloud_dataproc_v1_jobs_proto protoreflect.FileDescriptor

File_google_cloud_dataproc_v1_node_groups_proto

var File_google_cloud_dataproc_v1_node_groups_proto protoreflect.FileDescriptor

File_google_cloud_dataproc_v1_operations_proto

var File_google_cloud_dataproc_v1_operations_proto protoreflect.FileDescriptor

File_google_cloud_dataproc_v1_shared_proto

var File_google_cloud_dataproc_v1_shared_proto protoreflect.FileDescriptor

File_google_cloud_dataproc_v1_workflow_templates_proto

var File_google_cloud_dataproc_v1_workflow_templates_proto protoreflect.FileDescriptor

Functions

func RegisterAutoscalingPolicyServiceServer

func RegisterAutoscalingPolicyServiceServer(s *grpc.Server, srv AutoscalingPolicyServiceServer)

func RegisterBatchControllerServer

func RegisterBatchControllerServer(s *grpc.Server, srv BatchControllerServer)

func RegisterClusterControllerServer

func RegisterClusterControllerServer(s *grpc.Server, srv ClusterControllerServer)

func RegisterJobControllerServer

func RegisterJobControllerServer(s *grpc.Server, srv JobControllerServer)

func RegisterNodeGroupControllerServer

func RegisterNodeGroupControllerServer(s *grpc.Server, srv NodeGroupControllerServer)

func RegisterWorkflowTemplateServiceServer

func RegisterWorkflowTemplateServiceServer(s *grpc.Server, srv WorkflowTemplateServiceServer)

AcceleratorConfig

type AcceleratorConfig struct {

	// Full URL, partial URI, or short name of the accelerator type resource to
	// expose to this instance. See
	// [Compute Engine
	// AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).
	//
	// Examples:
	//
	// * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
	// * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
	// * `nvidia-tesla-k80`
	//
	// **Auto Zone Exception**: If you are using the Dataproc
	// [Auto Zone
	// Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
	// feature, you must use the short name of the accelerator type
	// resource, for example, `nvidia-tesla-k80`.
	AcceleratorTypeUri string `protobuf:"bytes,1,opt,name=accelerator_type_uri,json=acceleratorTypeUri,proto3" json:"accelerator_type_uri,omitempty"`
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount int32 `protobuf:"varint,2,opt,name=accelerator_count,json=acceleratorCount,proto3" json:"accelerator_count,omitempty"`
	// contains filtered or unexported fields
}

Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine.

func (*AcceleratorConfig) Descriptor

func (*AcceleratorConfig) Descriptor() ([]byte, []int)

Deprecated: Use AcceleratorConfig.ProtoReflect.Descriptor instead.

func (*AcceleratorConfig) GetAcceleratorCount

func (x *AcceleratorConfig) GetAcceleratorCount() int32

func (*AcceleratorConfig) GetAcceleratorTypeUri

func (x *AcceleratorConfig) GetAcceleratorTypeUri() string

func (*AcceleratorConfig) ProtoMessage

func (*AcceleratorConfig) ProtoMessage()

func (*AcceleratorConfig) ProtoReflect

func (x *AcceleratorConfig) ProtoReflect() protoreflect.Message

func (*AcceleratorConfig) Reset

func (x *AcceleratorConfig) Reset()

func (*AcceleratorConfig) String

func (x *AcceleratorConfig) String() string

AutoscalingConfig

type AutoscalingConfig struct {

	// Optional. The autoscaling policy used by the cluster.
	//
	// Only resource names including projectid and location (region) are valid.
	// Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`
	// * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`
	//
	// Note that the policy must be in the same project and Dataproc region.
	PolicyUri string `protobuf:"bytes,1,opt,name=policy_uri,json=policyUri,proto3" json:"policy_uri,omitempty"`
	// contains filtered or unexported fields
}

Autoscaling Policy config associated with the cluster.

func (*AutoscalingConfig) Descriptor

func (*AutoscalingConfig) Descriptor() ([]byte, []int)

Deprecated: Use AutoscalingConfig.ProtoReflect.Descriptor instead.

func (*AutoscalingConfig) GetPolicyUri

func (x *AutoscalingConfig) GetPolicyUri() string

func (*AutoscalingConfig) ProtoMessage

func (*AutoscalingConfig) ProtoMessage()

func (*AutoscalingConfig) ProtoReflect

func (x *AutoscalingConfig) ProtoReflect() protoreflect.Message

func (*AutoscalingConfig) Reset

func (x *AutoscalingConfig) Reset()

func (*AutoscalingConfig) String

func (x *AutoscalingConfig) String() string

AutoscalingPolicy

type AutoscalingPolicy struct {
	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`

	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`

	Algorithm isAutoscalingPolicy_Algorithm `protobuf_oneof:"algorithm"`

	WorkerConfig *InstanceGroupAutoscalingPolicyConfig `protobuf:"bytes,4,opt,name=worker_config,json=workerConfig,proto3" json:"worker_config,omitempty"`

	SecondaryWorkerConfig *InstanceGroupAutoscalingPolicyConfig `protobuf:"bytes,5,opt,name=secondary_worker_config,json=secondaryWorkerConfig,proto3" json:"secondary_worker_config,omitempty"`

	Labels map[string]string "" /* 153 byte string literal not displayed */

}

Describes an autoscaling policy for Dataproc cluster autoscaler.

func (*AutoscalingPolicy) Descriptor

func (*AutoscalingPolicy) Descriptor() ([]byte, []int)

Deprecated: Use AutoscalingPolicy.ProtoReflect.Descriptor instead.

func (*AutoscalingPolicy) GetAlgorithm

func (m *AutoscalingPolicy) GetAlgorithm() isAutoscalingPolicy_Algorithm

func (*AutoscalingPolicy) GetBasicAlgorithm

func (x *AutoscalingPolicy) GetBasicAlgorithm() *BasicAutoscalingAlgorithm

func (*AutoscalingPolicy) GetId

func (x *AutoscalingPolicy) GetId() string

func (*AutoscalingPolicy) GetLabels

func (x *AutoscalingPolicy) GetLabels() map[string]string

func (*AutoscalingPolicy) GetName

func (x *AutoscalingPolicy) GetName() string

func (*AutoscalingPolicy) GetSecondaryWorkerConfig

func (x *AutoscalingPolicy) GetSecondaryWorkerConfig() *InstanceGroupAutoscalingPolicyConfig

func (*AutoscalingPolicy) GetWorkerConfig

func (*AutoscalingPolicy) ProtoMessage

func (*AutoscalingPolicy) ProtoMessage()

func (*AutoscalingPolicy) ProtoReflect

func (x *AutoscalingPolicy) ProtoReflect() protoreflect.Message

func (*AutoscalingPolicy) Reset

func (x *AutoscalingPolicy) Reset()

func (*AutoscalingPolicy) String

func (x *AutoscalingPolicy) String() string

AutoscalingPolicyServiceClient

type AutoscalingPolicyServiceClient interface {
	// Creates new autoscaling policy.
	CreateAutoscalingPolicy(ctx context.Context, in *CreateAutoscalingPolicyRequest, opts ...grpc.CallOption) (*AutoscalingPolicy, error)
	// Updates (replaces) autoscaling policy.
	//
	// Disabled check for update_mask, because all updates will be full
	// replacements.
	UpdateAutoscalingPolicy(ctx context.Context, in *UpdateAutoscalingPolicyRequest, opts ...grpc.CallOption) (*AutoscalingPolicy, error)
	// Retrieves autoscaling policy.
	GetAutoscalingPolicy(ctx context.Context, in *GetAutoscalingPolicyRequest, opts ...grpc.CallOption) (*AutoscalingPolicy, error)
	// Lists autoscaling policies in the project.
	ListAutoscalingPolicies(ctx context.Context, in *ListAutoscalingPoliciesRequest, opts ...grpc.CallOption) (*ListAutoscalingPoliciesResponse, error)
	// Deletes an autoscaling policy. It is an error to delete an autoscaling
	// policy that is in use by one or more clusters.
	DeleteAutoscalingPolicy(ctx context.Context, in *DeleteAutoscalingPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}

AutoscalingPolicyServiceClient is the client API for AutoscalingPolicyService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewAutoscalingPolicyServiceClient

func NewAutoscalingPolicyServiceClient(cc grpc.ClientConnInterface) AutoscalingPolicyServiceClient

AutoscalingPolicyServiceServer

type AutoscalingPolicyServiceServer interface {
	// Creates new autoscaling policy.
	CreateAutoscalingPolicy(context.Context, *CreateAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
	// Updates (replaces) autoscaling policy.
	//
	// Disabled check for update_mask, because all updates will be full
	// replacements.
	UpdateAutoscalingPolicy(context.Context, *UpdateAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
	// Retrieves autoscaling policy.
	GetAutoscalingPolicy(context.Context, *GetAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
	// Lists autoscaling policies in the project.
	ListAutoscalingPolicies(context.Context, *ListAutoscalingPoliciesRequest) (*ListAutoscalingPoliciesResponse, error)
	// Deletes an autoscaling policy. It is an error to delete an autoscaling
	// policy that is in use by one or more clusters.
	DeleteAutoscalingPolicy(context.Context, *DeleteAutoscalingPolicyRequest) (*emptypb.Empty, error)
}

AutoscalingPolicyServiceServer is the server API for AutoscalingPolicyService service.

AutoscalingPolicy_BasicAlgorithm

type AutoscalingPolicy_BasicAlgorithm struct {
	BasicAlgorithm *BasicAutoscalingAlgorithm `protobuf:"bytes,3,opt,name=basic_algorithm,json=basicAlgorithm,proto3,oneof"`
}

AuxiliaryNodeGroup

type AuxiliaryNodeGroup struct {

	// Required. Node group configuration.
	NodeGroup *NodeGroup `protobuf:"bytes,1,opt,name=node_group,json=nodeGroup,proto3" json:"node_group,omitempty"`
	// Optional. A node group ID. Generated if not specified.
	//
	// The ID must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). Cannot begin or end with underscore
	// or hyphen. Must consist of from 3 to 33 characters.
	NodeGroupId string `protobuf:"bytes,2,opt,name=node_group_id,json=nodeGroupId,proto3" json:"node_group_id,omitempty"`
	// contains filtered or unexported fields
}

Node group identification and configuration information.

func (*AuxiliaryNodeGroup) Descriptor

func (*AuxiliaryNodeGroup) Descriptor() ([]byte, []int)

Deprecated: Use AuxiliaryNodeGroup.ProtoReflect.Descriptor instead.

func (*AuxiliaryNodeGroup) GetNodeGroup

func (x *AuxiliaryNodeGroup) GetNodeGroup() *NodeGroup

func (*AuxiliaryNodeGroup) GetNodeGroupId

func (x *AuxiliaryNodeGroup) GetNodeGroupId() string

func (*AuxiliaryNodeGroup) ProtoMessage

func (*AuxiliaryNodeGroup) ProtoMessage()

func (*AuxiliaryNodeGroup) ProtoReflect

func (x *AuxiliaryNodeGroup) ProtoReflect() protoreflect.Message

func (*AuxiliaryNodeGroup) Reset

func (x *AuxiliaryNodeGroup) Reset()

func (*AuxiliaryNodeGroup) String

func (x *AuxiliaryNodeGroup) String() string

AuxiliaryServicesConfig

type AuxiliaryServicesConfig struct {
	MetastoreConfig *MetastoreConfig `protobuf:"bytes,1,opt,name=metastore_config,json=metastoreConfig,proto3" json:"metastore_config,omitempty"`

	SparkHistoryServerConfig *SparkHistoryServerConfig "" /* 137 byte string literal not displayed */

}

Auxiliary services configuration for a Cluster.

func (*AuxiliaryServicesConfig) Descriptor

func (*AuxiliaryServicesConfig) Descriptor() ([]byte, []int)

Deprecated: Use AuxiliaryServicesConfig.ProtoReflect.Descriptor instead.

func (*AuxiliaryServicesConfig) GetMetastoreConfig

func (x *AuxiliaryServicesConfig) GetMetastoreConfig() *MetastoreConfig

func (*AuxiliaryServicesConfig) GetSparkHistoryServerConfig

func (x *AuxiliaryServicesConfig) GetSparkHistoryServerConfig() *SparkHistoryServerConfig

func (*AuxiliaryServicesConfig) ProtoMessage

func (*AuxiliaryServicesConfig) ProtoMessage()

func (*AuxiliaryServicesConfig) ProtoReflect

func (x *AuxiliaryServicesConfig) ProtoReflect() protoreflect.Message

func (*AuxiliaryServicesConfig) Reset

func (x *AuxiliaryServicesConfig) Reset()

func (*AuxiliaryServicesConfig) String

func (x *AuxiliaryServicesConfig) String() string

BasicAutoscalingAlgorithm

type BasicAutoscalingAlgorithm struct {

	// Types that are assignable to Config:
	//
	//	*BasicAutoscalingAlgorithm_YarnConfig
	Config isBasicAutoscalingAlgorithm_Config `protobuf_oneof:"config"`
	// Optional. Duration between scaling events. A scaling period starts after
	// the update operation from the previous event has completed.
	//
	// Bounds: [2m, 1d]. Default: 2m.
	CooldownPeriod *durationpb.Duration `protobuf:"bytes,2,opt,name=cooldown_period,json=cooldownPeriod,proto3" json:"cooldown_period,omitempty"`
	// contains filtered or unexported fields
}

Basic algorithm for autoscaling.

func (*BasicAutoscalingAlgorithm) Descriptor

func (*BasicAutoscalingAlgorithm) Descriptor() ([]byte, []int)

Deprecated: Use BasicAutoscalingAlgorithm.ProtoReflect.Descriptor instead.

func (*BasicAutoscalingAlgorithm) GetConfig

func (m *BasicAutoscalingAlgorithm) GetConfig() isBasicAutoscalingAlgorithm_Config

func (*BasicAutoscalingAlgorithm) GetCooldownPeriod

func (x *BasicAutoscalingAlgorithm) GetCooldownPeriod() *durationpb.Duration

func (*BasicAutoscalingAlgorithm) GetYarnConfig

func (*BasicAutoscalingAlgorithm) ProtoMessage

func (*BasicAutoscalingAlgorithm) ProtoMessage()

func (*BasicAutoscalingAlgorithm) ProtoReflect

func (*BasicAutoscalingAlgorithm) Reset

func (x *BasicAutoscalingAlgorithm) Reset()

func (*BasicAutoscalingAlgorithm) String

func (x *BasicAutoscalingAlgorithm) String() string

BasicAutoscalingAlgorithm_YarnConfig

type BasicAutoscalingAlgorithm_YarnConfig struct {
	// Required. YARN autoscaling configuration.
	YarnConfig *BasicYarnAutoscalingConfig `protobuf:"bytes,1,opt,name=yarn_config,json=yarnConfig,proto3,oneof"`
}

BasicYarnAutoscalingConfig

type BasicYarnAutoscalingConfig struct {
	GracefulDecommissionTimeout *durationpb.Duration "" /* 144 byte string literal not displayed */

	ScaleUpFactor float64 `protobuf:"fixed64,1,opt,name=scale_up_factor,json=scaleUpFactor,proto3" json:"scale_up_factor,omitempty"`

	ScaleDownFactor float64 `protobuf:"fixed64,2,opt,name=scale_down_factor,json=scaleDownFactor,proto3" json:"scale_down_factor,omitempty"`

	ScaleUpMinWorkerFraction float64 "" /* 141 byte string literal not displayed */

	ScaleDownMinWorkerFraction float64 "" /* 147 byte string literal not displayed */

}

Basic autoscaling configurations for YARN.

func (*BasicYarnAutoscalingConfig) Descriptor

func (*BasicYarnAutoscalingConfig) Descriptor() ([]byte, []int)

Deprecated: Use BasicYarnAutoscalingConfig.ProtoReflect.Descriptor instead.

func (*BasicYarnAutoscalingConfig) GetGracefulDecommissionTimeout

func (x *BasicYarnAutoscalingConfig) GetGracefulDecommissionTimeout() *durationpb.Duration

func (*BasicYarnAutoscalingConfig) GetScaleDownFactor

func (x *BasicYarnAutoscalingConfig) GetScaleDownFactor() float64

func (*BasicYarnAutoscalingConfig) GetScaleDownMinWorkerFraction

func (x *BasicYarnAutoscalingConfig) GetScaleDownMinWorkerFraction() float64

func (*BasicYarnAutoscalingConfig) GetScaleUpFactor

func (x *BasicYarnAutoscalingConfig) GetScaleUpFactor() float64

func (*BasicYarnAutoscalingConfig) GetScaleUpMinWorkerFraction

func (x *BasicYarnAutoscalingConfig) GetScaleUpMinWorkerFraction() float64

func (*BasicYarnAutoscalingConfig) ProtoMessage

func (*BasicYarnAutoscalingConfig) ProtoMessage()

func (*BasicYarnAutoscalingConfig) ProtoReflect

func (*BasicYarnAutoscalingConfig) Reset

func (x *BasicYarnAutoscalingConfig) Reset()

func (*BasicYarnAutoscalingConfig) String

func (x *BasicYarnAutoscalingConfig) String() string

Batch

type Batch struct {
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`

	Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"`

	CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`

	BatchConfig isBatch_BatchConfig `protobuf_oneof:"batch_config"`

	RuntimeInfo *RuntimeInfo `protobuf:"bytes,8,opt,name=runtime_info,json=runtimeInfo,proto3" json:"runtime_info,omitempty"`

	State Batch_State `protobuf:"varint,9,opt,name=state,proto3,enum=google.cloud.dataproc.v1.Batch_State" json:"state,omitempty"`

	StateMessage string `protobuf:"bytes,10,opt,name=state_message,json=stateMessage,proto3" json:"state_message,omitempty"`

	StateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=state_time,json=stateTime,proto3" json:"state_time,omitempty"`

	Creator string `protobuf:"bytes,12,opt,name=creator,proto3" json:"creator,omitempty"`

	Labels map[string]string "" /* 154 byte string literal not displayed */

	RuntimeConfig *RuntimeConfig `protobuf:"bytes,14,opt,name=runtime_config,json=runtimeConfig,proto3" json:"runtime_config,omitempty"`

	EnvironmentConfig *EnvironmentConfig `protobuf:"bytes,15,opt,name=environment_config,json=environmentConfig,proto3" json:"environment_config,omitempty"`

	Operation string `protobuf:"bytes,16,opt,name=operation,proto3" json:"operation,omitempty"`

	StateHistory []*Batch_StateHistory `protobuf:"bytes,17,rep,name=state_history,json=stateHistory,proto3" json:"state_history,omitempty"`

}

A representation of a batch workload in the service.

func (*Batch) Descriptor

func (*Batch) Descriptor() ([]byte, []int)

Deprecated: Use Batch.ProtoReflect.Descriptor instead.

func (*Batch) GetBatchConfig

func (m *Batch) GetBatchConfig() isBatch_BatchConfig

func (*Batch) GetCreateTime

func (x *Batch) GetCreateTime() *timestamppb.Timestamp

func (*Batch) GetCreator

func (x *Batch) GetCreator() string

func (*Batch) GetEnvironmentConfig

func (x *Batch) GetEnvironmentConfig() *EnvironmentConfig

func (*Batch) GetLabels

func (x *Batch) GetLabels() map[string]string

func (*Batch) GetName

func (x *Batch) GetName() string

func (*Batch) GetOperation

func (x *Batch) GetOperation() string

func (*Batch) GetPysparkBatch

func (x *Batch) GetPysparkBatch() *PySparkBatch

func (*Batch) GetRuntimeConfig

func (x *Batch) GetRuntimeConfig() *RuntimeConfig

func (*Batch) GetRuntimeInfo

func (x *Batch) GetRuntimeInfo() *RuntimeInfo

func (*Batch) GetSparkBatch

func (x *Batch) GetSparkBatch() *SparkBatch

func (*Batch) GetSparkRBatch

func (x *Batch) GetSparkRBatch() *SparkRBatch

func (*Batch) GetSparkSqlBatch

func (x *Batch) GetSparkSqlBatch() *SparkSqlBatch

func (*Batch) GetState

func (x *Batch) GetState() Batch_State

func (*Batch) GetStateHistory

func (x *Batch) GetStateHistory() []*Batch_StateHistory

func (*Batch) GetStateMessage

func (x *Batch) GetStateMessage() string

func (*Batch) GetStateTime

func (x *Batch) GetStateTime() *timestamppb.Timestamp

func (*Batch) GetUuid

func (x *Batch) GetUuid() string

func (*Batch) ProtoMessage

func (*Batch) ProtoMessage()

func (*Batch) ProtoReflect

func (x *Batch) ProtoReflect() protoreflect.Message

func (*Batch) Reset

func (x *Batch) Reset()

func (*Batch) String

func (x *Batch) String() string

BatchControllerClient

type BatchControllerClient interface {
	// Creates a batch workload that executes asynchronously.
	CreateBatch(ctx context.Context, in *CreateBatchRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Gets the batch workload resource representation.
	GetBatch(ctx context.Context, in *GetBatchRequest, opts ...grpc.CallOption) (*Batch, error)
	// Lists batch workloads.
	ListBatches(ctx context.Context, in *ListBatchesRequest, opts ...grpc.CallOption) (*ListBatchesResponse, error)
	// Deletes the batch workload resource. If the batch is not in terminal state,
	// the delete fails and the response returns `FAILED_PRECONDITION`.
	DeleteBatch(ctx context.Context, in *DeleteBatchRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}

BatchControllerClient is the client API for BatchController service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewBatchControllerClient

func NewBatchControllerClient(cc grpc.ClientConnInterface) BatchControllerClient

BatchControllerServer

type BatchControllerServer interface {
	// Creates a batch workload that executes asynchronously.
	CreateBatch(context.Context, *CreateBatchRequest) (*longrunning.Operation, error)
	// Gets the batch workload resource representation.
	GetBatch(context.Context, *GetBatchRequest) (*Batch, error)
	// Lists batch workloads.
	ListBatches(context.Context, *ListBatchesRequest) (*ListBatchesResponse, error)
	// Deletes the batch workload resource. If the batch is not in terminal state,
	// the delete fails and the response returns `FAILED_PRECONDITION`.
	DeleteBatch(context.Context, *DeleteBatchRequest) (*emptypb.Empty, error)
}

BatchControllerServer is the server API for BatchController service.

BatchOperationMetadata

type BatchOperationMetadata struct {
	Batch string `protobuf:"bytes,1,opt,name=batch,proto3" json:"batch,omitempty"`

	BatchUuid string `protobuf:"bytes,2,opt,name=batch_uuid,json=batchUuid,proto3" json:"batch_uuid,omitempty"`

	CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`

	DoneTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=done_time,json=doneTime,proto3" json:"done_time,omitempty"`

	OperationType BatchOperationMetadata_BatchOperationType "" /* 173 byte string literal not displayed */

	Description string `protobuf:"bytes,7,opt,name=description,proto3" json:"description,omitempty"`

	Labels map[string]string "" /* 153 byte string literal not displayed */

	Warnings []string `protobuf:"bytes,9,rep,name=warnings,proto3" json:"warnings,omitempty"`

}

Metadata describing the Batch operation.

func (*BatchOperationMetadata) Descriptor

func (*BatchOperationMetadata) Descriptor() ([]byte, []int)

Deprecated: Use BatchOperationMetadata.ProtoReflect.Descriptor instead.

func (*BatchOperationMetadata) GetBatch

func (x *BatchOperationMetadata) GetBatch() string

func (*BatchOperationMetadata) GetBatchUuid

func (x *BatchOperationMetadata) GetBatchUuid() string

func (*BatchOperationMetadata) GetCreateTime

func (x *BatchOperationMetadata) GetCreateTime() *timestamppb.Timestamp

func (*BatchOperationMetadata) GetDescription

func (x *BatchOperationMetadata) GetDescription() string

func (*BatchOperationMetadata) GetDoneTime

func (x *BatchOperationMetadata) GetDoneTime() *timestamppb.Timestamp

func (*BatchOperationMetadata) GetLabels

func (x *BatchOperationMetadata) GetLabels() map[string]string

func (*BatchOperationMetadata) GetOperationType

func (*BatchOperationMetadata) GetWarnings

func (x *BatchOperationMetadata) GetWarnings() []string

func (*BatchOperationMetadata) ProtoMessage

func (*BatchOperationMetadata) ProtoMessage()

func (*BatchOperationMetadata) ProtoReflect

func (x *BatchOperationMetadata) ProtoReflect() protoreflect.Message

func (*BatchOperationMetadata) Reset

func (x *BatchOperationMetadata) Reset()

func (*BatchOperationMetadata) String

func (x *BatchOperationMetadata) String() string

BatchOperationMetadata_BatchOperationType

type BatchOperationMetadata_BatchOperationType int32

Operation type for Batch resources

BatchOperationMetadata_BATCH_OPERATION_TYPE_UNSPECIFIED, BatchOperationMetadata_BATCH

const (
	// Batch operation type is unknown.
	BatchOperationMetadata_BATCH_OPERATION_TYPE_UNSPECIFIED BatchOperationMetadata_BatchOperationType = 0
	// Batch operation type.
	BatchOperationMetadata_BATCH BatchOperationMetadata_BatchOperationType = 1
)

func (BatchOperationMetadata_BatchOperationType) Descriptor

func (BatchOperationMetadata_BatchOperationType) Enum

func (BatchOperationMetadata_BatchOperationType) EnumDescriptor

func (BatchOperationMetadata_BatchOperationType) EnumDescriptor() ([]byte, []int)

Deprecated: Use BatchOperationMetadata_BatchOperationType.Descriptor instead.

func (BatchOperationMetadata_BatchOperationType) Number

func (BatchOperationMetadata_BatchOperationType) String

func (BatchOperationMetadata_BatchOperationType) Type

Batch_PysparkBatch

type Batch_PysparkBatch struct {
	// Optional. PySpark batch config.
	PysparkBatch *PySparkBatch `protobuf:"bytes,4,opt,name=pyspark_batch,json=pysparkBatch,proto3,oneof"`
}

Batch_SparkBatch

type Batch_SparkBatch struct {
	// Optional. Spark batch config.
	SparkBatch *SparkBatch `protobuf:"bytes,5,opt,name=spark_batch,json=sparkBatch,proto3,oneof"`
}

Batch_SparkRBatch

type Batch_SparkRBatch struct {
	// Optional. SparkR batch config.
	SparkRBatch *SparkRBatch `protobuf:"bytes,6,opt,name=spark_r_batch,json=sparkRBatch,proto3,oneof"`
}

Batch_SparkSqlBatch

type Batch_SparkSqlBatch struct {
	// Optional. SparkSql batch config.
	SparkSqlBatch *SparkSqlBatch `protobuf:"bytes,7,opt,name=spark_sql_batch,json=sparkSqlBatch,proto3,oneof"`
}

Batch_State

type Batch_State int32

The batch state.

Batch_STATE_UNSPECIFIED, Batch_PENDING, Batch_RUNNING, Batch_CANCELLING, Batch_CANCELLED, Batch_SUCCEEDED, Batch_FAILED

const (
	// The batch state is unknown.
	Batch_STATE_UNSPECIFIED Batch_State = 0
	// The batch is created before running.
	Batch_PENDING Batch_State = 1
	// The batch is running.
	Batch_RUNNING Batch_State = 2
	// The batch is cancelling.
	Batch_CANCELLING Batch_State = 3
	// The batch cancellation was successful.
	Batch_CANCELLED Batch_State = 4
	// The batch completed successfully.
	Batch_SUCCEEDED Batch_State = 5
	// The batch is no longer running due to an error.
	Batch_FAILED Batch_State = 6
)

func (Batch_State) Descriptor

func (Batch_State) Enum

func (x Batch_State) Enum() *Batch_State

func (Batch_State) EnumDescriptor

func (Batch_State) EnumDescriptor() ([]byte, []int)

Deprecated: Use Batch_State.Descriptor instead.

func (Batch_State) Number

func (x Batch_State) Number() protoreflect.EnumNumber

func (Batch_State) String

func (x Batch_State) String() string

func (Batch_State) Type

Batch_StateHistory

type Batch_StateHistory struct {

	// Output only. The state of the batch at this point in history.
	State Batch_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1.Batch_State" json:"state,omitempty"`
	// Output only. Details about the state at this point in history.
	StateMessage string `protobuf:"bytes,2,opt,name=state_message,json=stateMessage,proto3" json:"state_message,omitempty"`
	// Output only. The time when the batch entered the historical state.
	StateStartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
	// contains filtered or unexported fields
}

Historical state information.

func (*Batch_StateHistory) Descriptor

func (*Batch_StateHistory) Descriptor() ([]byte, []int)

Deprecated: Use Batch_StateHistory.ProtoReflect.Descriptor instead.

func (*Batch_StateHistory) GetState

func (x *Batch_StateHistory) GetState() Batch_State

func (*Batch_StateHistory) GetStateMessage

func (x *Batch_StateHistory) GetStateMessage() string

func (*Batch_StateHistory) GetStateStartTime

func (x *Batch_StateHistory) GetStateStartTime() *timestamppb.Timestamp

func (*Batch_StateHistory) ProtoMessage

func (*Batch_StateHistory) ProtoMessage()

func (*Batch_StateHistory) ProtoReflect

func (x *Batch_StateHistory) ProtoReflect() protoreflect.Message

func (*Batch_StateHistory) Reset

func (x *Batch_StateHistory) Reset()

func (*Batch_StateHistory) String

func (x *Batch_StateHistory) String() string

CancelJobRequest

type CancelJobRequest struct {

	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job ID.
	JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	// contains filtered or unexported fields
}

A request to cancel a job.

func (*CancelJobRequest) Descriptor

func (*CancelJobRequest) Descriptor() ([]byte, []int)

Deprecated: Use CancelJobRequest.ProtoReflect.Descriptor instead.

func (*CancelJobRequest) GetJobId

func (x *CancelJobRequest) GetJobId() string

func (*CancelJobRequest) GetProjectId

func (x *CancelJobRequest) GetProjectId() string

func (*CancelJobRequest) GetRegion

func (x *CancelJobRequest) GetRegion() string

func (*CancelJobRequest) ProtoMessage

func (*CancelJobRequest) ProtoMessage()

func (*CancelJobRequest) ProtoReflect

func (x *CancelJobRequest) ProtoReflect() protoreflect.Message

func (*CancelJobRequest) Reset

func (x *CancelJobRequest) Reset()

func (*CancelJobRequest) String

func (x *CancelJobRequest) String() string

Cluster

type Cluster struct {
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`

	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`

	Config *ClusterConfig `protobuf:"bytes,3,opt,name=config,proto3" json:"config,omitempty"`

	VirtualClusterConfig *VirtualClusterConfig `protobuf:"bytes,10,opt,name=virtual_cluster_config,json=virtualClusterConfig,proto3" json:"virtual_cluster_config,omitempty"`

	Labels map[string]string "" /* 153 byte string literal not displayed */

	Status *ClusterStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`

	StatusHistory []*ClusterStatus `protobuf:"bytes,7,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`

	ClusterUuid string `protobuf:"bytes,6,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`

	Metrics *ClusterMetrics `protobuf:"bytes,9,opt,name=metrics,proto3" json:"metrics,omitempty"`

}

Describes the identifying information, config, and status of a Dataproc cluster

func (*Cluster) Descriptor

func (*Cluster) Descriptor() ([]byte, []int)

Deprecated: Use Cluster.ProtoReflect.Descriptor instead.

func (*Cluster) GetClusterName

func (x *Cluster) GetClusterName() string

func (*Cluster) GetClusterUuid

func (x *Cluster) GetClusterUuid() string

func (*Cluster) GetConfig

func (x *Cluster) GetConfig() *ClusterConfig

func (*Cluster) GetLabels

func (x *Cluster) GetLabels() map[string]string

func (*Cluster) GetMetrics

func (x *Cluster) GetMetrics() *ClusterMetrics

func (*Cluster) GetProjectId

func (x *Cluster) GetProjectId() string

func (*Cluster) GetStatus

func (x *Cluster) GetStatus() *ClusterStatus

func (*Cluster) GetStatusHistory

func (x *Cluster) GetStatusHistory() []*ClusterStatus

func (*Cluster) GetVirtualClusterConfig

func (x *Cluster) GetVirtualClusterConfig() *VirtualClusterConfig

func (*Cluster) ProtoMessage

func (*Cluster) ProtoMessage()

func (*Cluster) ProtoReflect

func (x *Cluster) ProtoReflect() protoreflect.Message

func (*Cluster) Reset

func (x *Cluster) Reset()

func (*Cluster) String

func (x *Cluster) String() string

ClusterConfig

type ClusterConfig struct {
	ConfigBucket string `protobuf:"bytes,1,opt,name=config_bucket,json=configBucket,proto3" json:"config_bucket,omitempty"`

	TempBucket string `protobuf:"bytes,2,opt,name=temp_bucket,json=tempBucket,proto3" json:"temp_bucket,omitempty"`

	GceClusterConfig *GceClusterConfig `protobuf:"bytes,8,opt,name=gce_cluster_config,json=gceClusterConfig,proto3" json:"gce_cluster_config,omitempty"`

	MasterConfig *InstanceGroupConfig `protobuf:"bytes,9,opt,name=master_config,json=masterConfig,proto3" json:"master_config,omitempty"`

	WorkerConfig *InstanceGroupConfig `protobuf:"bytes,10,opt,name=worker_config,json=workerConfig,proto3" json:"worker_config,omitempty"`

	SecondaryWorkerConfig *InstanceGroupConfig "" /* 127 byte string literal not displayed */

	SoftwareConfig *SoftwareConfig `protobuf:"bytes,13,opt,name=software_config,json=softwareConfig,proto3" json:"software_config,omitempty"`

	InitializationActions []*NodeInitializationAction `protobuf:"bytes,11,rep,name=initialization_actions,json=initializationActions,proto3" json:"initialization_actions,omitempty"`

	EncryptionConfig *EncryptionConfig `protobuf:"bytes,15,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`

	AutoscalingConfig *AutoscalingConfig `protobuf:"bytes,18,opt,name=autoscaling_config,json=autoscalingConfig,proto3" json:"autoscaling_config,omitempty"`

	SecurityConfig *SecurityConfig `protobuf:"bytes,16,opt,name=security_config,json=securityConfig,proto3" json:"security_config,omitempty"`

	LifecycleConfig *LifecycleConfig `protobuf:"bytes,17,opt,name=lifecycle_config,json=lifecycleConfig,proto3" json:"lifecycle_config,omitempty"`

	EndpointConfig *EndpointConfig `protobuf:"bytes,19,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"`

	MetastoreConfig *MetastoreConfig `protobuf:"bytes,20,opt,name=metastore_config,json=metastoreConfig,proto3" json:"metastore_config,omitempty"`

	DataprocMetricConfig *DataprocMetricConfig `protobuf:"bytes,23,opt,name=dataproc_metric_config,json=dataprocMetricConfig,proto3" json:"dataproc_metric_config,omitempty"`

	AuxiliaryNodeGroups []*AuxiliaryNodeGroup `protobuf:"bytes,25,rep,name=auxiliary_node_groups,json=auxiliaryNodeGroups,proto3" json:"auxiliary_node_groups,omitempty"`

}

The cluster config.

func (*ClusterConfig) Descriptor

func (*ClusterConfig) Descriptor() ([]byte, []int)

Deprecated: Use ClusterConfig.ProtoReflect.Descriptor instead.

func (*ClusterConfig) GetAutoscalingConfig

func (x *ClusterConfig) GetAutoscalingConfig() *AutoscalingConfig

func (*ClusterConfig) GetAuxiliaryNodeGroups

func (x *ClusterConfig) GetAuxiliaryNodeGroups() []*AuxiliaryNodeGroup

func (*ClusterConfig) GetConfigBucket

func (x *ClusterConfig) GetConfigBucket() string

func (*ClusterConfig) GetDataprocMetricConfig

func (x *ClusterConfig) GetDataprocMetricConfig() *DataprocMetricConfig

func (*ClusterConfig) GetEncryptionConfig

func (x *ClusterConfig) GetEncryptionConfig() *EncryptionConfig

func (*ClusterConfig) GetEndpointConfig

func (x *ClusterConfig) GetEndpointConfig() *EndpointConfig

func (*ClusterConfig) GetGceClusterConfig

func (x *ClusterConfig) GetGceClusterConfig() *GceClusterConfig

func (*ClusterConfig) GetInitializationActions

func (x *ClusterConfig) GetInitializationActions() []*NodeInitializationAction

func (*ClusterConfig) GetLifecycleConfig

func (x *ClusterConfig) GetLifecycleConfig() *LifecycleConfig

func (*ClusterConfig) GetMasterConfig

func (x *ClusterConfig) GetMasterConfig() *InstanceGroupConfig

func (*ClusterConfig) GetMetastoreConfig

func (x *ClusterConfig) GetMetastoreConfig() *MetastoreConfig

func (*ClusterConfig) GetSecondaryWorkerConfig

func (x *ClusterConfig) GetSecondaryWorkerConfig() *InstanceGroupConfig

func (*ClusterConfig) GetSecurityConfig

func (x *ClusterConfig) GetSecurityConfig() *SecurityConfig

func (*ClusterConfig) GetSoftwareConfig

func (x *ClusterConfig) GetSoftwareConfig() *SoftwareConfig

func (*ClusterConfig) GetTempBucket

func (x *ClusterConfig) GetTempBucket() string

func (*ClusterConfig) GetWorkerConfig

func (x *ClusterConfig) GetWorkerConfig() *InstanceGroupConfig

func (*ClusterConfig) ProtoMessage

func (*ClusterConfig) ProtoMessage()

func (*ClusterConfig) ProtoReflect

func (x *ClusterConfig) ProtoReflect() protoreflect.Message

func (*ClusterConfig) Reset

func (x *ClusterConfig) Reset()

func (*ClusterConfig) String

func (x *ClusterConfig) String() string

ClusterControllerClient

type ClusterControllerClient interface {
	// Creates a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Updates a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	// The cluster must be in a
	// [`RUNNING`][google.cloud.dataproc.v1.ClusterStatus.State] state or an error
	// is returned.
	UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Stops a cluster in a project.
	StopCluster(ctx context.Context, in *StopClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Starts a cluster in a project.
	StartCluster(ctx context.Context, in *StartClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Deletes a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Gets the resource representation for a cluster in a project.
	GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error)
	// Lists all regions/{region}/clusters in a project alphabetically.
	ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error)
	// Gets cluster diagnostic information. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	// After the operation completes,
	// [Operation.response][google.longrunning.Operation.response]
	// contains
	// [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
	DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
}

ClusterControllerClient is the client API for ClusterController service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewClusterControllerClient

func NewClusterControllerClient(cc grpc.ClientConnInterface) ClusterControllerClient

ClusterControllerServer

type ClusterControllerServer interface {
	// Creates a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	CreateCluster(context.Context, *CreateClusterRequest) (*longrunning.Operation, error)
	// Updates a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	// The cluster must be in a
	// [`RUNNING`][google.cloud.dataproc.v1.ClusterStatus.State] state or an error
	// is returned.
	UpdateCluster(context.Context, *UpdateClusterRequest) (*longrunning.Operation, error)
	// Stops a cluster in a project.
	StopCluster(context.Context, *StopClusterRequest) (*longrunning.Operation, error)
	// Starts a cluster in a project.
	StartCluster(context.Context, *StartClusterRequest) (*longrunning.Operation, error)
	// Deletes a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	DeleteCluster(context.Context, *DeleteClusterRequest) (*longrunning.Operation, error)
	// Gets the resource representation for a cluster in a project.
	GetCluster(context.Context, *GetClusterRequest) (*Cluster, error)
	// Lists all regions/{region}/clusters in a project alphabetically.
	ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error)
	// Gets cluster diagnostic information. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	// After the operation completes,
	// [Operation.response][google.longrunning.Operation.response]
	// contains
	// [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
	DiagnoseCluster(context.Context, *DiagnoseClusterRequest) (*longrunning.Operation, error)
}

ClusterControllerServer is the server API for ClusterController service.

ClusterMetrics

type ClusterMetrics struct {
	HdfsMetrics map[string]int64 "" /* 183 byte string literal not displayed */

	YarnMetrics map[string]int64 "" /* 183 byte string literal not displayed */

}

Contains cluster daemon metrics, such as HDFS and YARN stats.

Beta Feature: This report is available for testing purposes only. It may be changed before final release.

func (*ClusterMetrics) Descriptor

func (*ClusterMetrics) Descriptor() ([]byte, []int)

Deprecated: Use ClusterMetrics.ProtoReflect.Descriptor instead.

func (*ClusterMetrics) GetHdfsMetrics

func (x *ClusterMetrics) GetHdfsMetrics() map[string]int64

func (*ClusterMetrics) GetYarnMetrics

func (x *ClusterMetrics) GetYarnMetrics() map[string]int64

func (*ClusterMetrics) ProtoMessage

func (*ClusterMetrics) ProtoMessage()

func (*ClusterMetrics) ProtoReflect

func (x *ClusterMetrics) ProtoReflect() protoreflect.Message

func (*ClusterMetrics) Reset

func (x *ClusterMetrics) Reset()

func (*ClusterMetrics) String

func (x *ClusterMetrics) String() string

ClusterOperation

type ClusterOperation struct {

	// Output only. The id of the cluster operation.
	OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
	// Output only. Error, if operation failed.
	Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
	// Output only. Indicates the operation is done.
	Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
	// contains filtered or unexported fields
}

The cluster operation triggered by a workflow.

func (*ClusterOperation) Descriptor

func (*ClusterOperation) Descriptor() ([]byte, []int)

Deprecated: Use ClusterOperation.ProtoReflect.Descriptor instead.

func (*ClusterOperation) GetDone

func (x *ClusterOperation) GetDone() bool

func (*ClusterOperation) GetError

func (x *ClusterOperation) GetError() string

func (*ClusterOperation) GetOperationId

func (x *ClusterOperation) GetOperationId() string

func (*ClusterOperation) ProtoMessage

func (*ClusterOperation) ProtoMessage()

func (*ClusterOperation) ProtoReflect

func (x *ClusterOperation) ProtoReflect() protoreflect.Message

func (*ClusterOperation) Reset

func (x *ClusterOperation) Reset()

func (*ClusterOperation) String

func (x *ClusterOperation) String() string

ClusterOperationMetadata

type ClusterOperationMetadata struct {
	ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`

	ClusterUuid string `protobuf:"bytes,8,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`

	Status *ClusterOperationStatus `protobuf:"bytes,9,opt,name=status,proto3" json:"status,omitempty"`

	StatusHistory []*ClusterOperationStatus `protobuf:"bytes,10,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`

	OperationType string `protobuf:"bytes,11,opt,name=operation_type,json=operationType,proto3" json:"operation_type,omitempty"`

	Description string `protobuf:"bytes,12,opt,name=description,proto3" json:"description,omitempty"`

	Labels map[string]string "" /* 154 byte string literal not displayed */

	Warnings []string `protobuf:"bytes,14,rep,name=warnings,proto3" json:"warnings,omitempty"`

}

Metadata describing the operation.

func (*ClusterOperationMetadata) Descriptor

func (*ClusterOperationMetadata) Descriptor() ([]byte, []int)

Deprecated: Use ClusterOperationMetadata.ProtoReflect.Descriptor instead.

func (*ClusterOperationMetadata) GetClusterName

func (x *ClusterOperationMetadata) GetClusterName() string

func (*ClusterOperationMetadata) GetClusterUuid

func (x *ClusterOperationMetadata) GetClusterUuid() string

func (*ClusterOperationMetadata) GetDescription

func (x *ClusterOperationMetadata) GetDescription() string

func (*ClusterOperationMetadata) GetLabels

func (x *ClusterOperationMetadata) GetLabels() map[string]string

func (*ClusterOperationMetadata) GetOperationType

func (x *ClusterOperationMetadata) GetOperationType() string

func (*ClusterOperationMetadata) GetStatus

func (*ClusterOperationMetadata) GetStatusHistory

func (x *ClusterOperationMetadata) GetStatusHistory() []*ClusterOperationStatus

func (*ClusterOperationMetadata) GetWarnings

func (x *ClusterOperationMetadata) GetWarnings() []string

func (*ClusterOperationMetadata) ProtoMessage

func (*ClusterOperationMetadata) ProtoMessage()

func (*ClusterOperationMetadata) ProtoReflect

func (x *ClusterOperationMetadata) ProtoReflect() protoreflect.Message

func (*ClusterOperationMetadata) Reset

func (x *ClusterOperationMetadata) Reset()

func (*ClusterOperationMetadata) String

func (x *ClusterOperationMetadata) String() string

ClusterOperationStatus

type ClusterOperationStatus struct {

	// Output only. A message containing the operation state.
	State ClusterOperationStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1.ClusterOperationStatus_State" json:"state,omitempty"`
	// Output only. A message containing the detailed operation state.
	InnerState string `protobuf:"bytes,2,opt,name=inner_state,json=innerState,proto3" json:"inner_state,omitempty"`
	// Output only. A message containing any operation metadata details.
	Details string `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"`
	// Output only. The time this state was entered.
	StateStartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
	// contains filtered or unexported fields
}

The status of the operation.

func (*ClusterOperationStatus) Descriptor

func (*ClusterOperationStatus) Descriptor() ([]byte, []int)

Deprecated: Use ClusterOperationStatus.ProtoReflect.Descriptor instead.

func (*ClusterOperationStatus) GetDetails

func (x *ClusterOperationStatus) GetDetails() string

func (*ClusterOperationStatus) GetInnerState

func (x *ClusterOperationStatus) GetInnerState() string

func (*ClusterOperationStatus) GetState

func (*ClusterOperationStatus) GetStateStartTime

func (x *ClusterOperationStatus) GetStateStartTime() *timestamppb.Timestamp

func (*ClusterOperationStatus) ProtoMessage

func (*ClusterOperationStatus) ProtoMessage()

func (*ClusterOperationStatus) ProtoReflect

func (x *ClusterOperationStatus) ProtoReflect() protoreflect.Message

func (*ClusterOperationStatus) Reset

func (x *ClusterOperationStatus) Reset()

func (*ClusterOperationStatus) String

func (x *ClusterOperationStatus) String() string

ClusterOperationStatus_State

type ClusterOperationStatus_State int32

The operation state.

ClusterOperationStatus_UNKNOWN, ClusterOperationStatus_PENDING, ClusterOperationStatus_RUNNING, ClusterOperationStatus_DONE

const (
	// Unused.
	ClusterOperationStatus_UNKNOWN ClusterOperationStatus_State = 0
	// The operation has been created.
	ClusterOperationStatus_PENDING ClusterOperationStatus_State = 1
	// The operation is running.
	ClusterOperationStatus_RUNNING ClusterOperationStatus_State = 2
	// The operation is done; either cancelled or completed.
	ClusterOperationStatus_DONE ClusterOperationStatus_State = 3
)

func (ClusterOperationStatus_State) Descriptor

func (ClusterOperationStatus_State) Enum

func (ClusterOperationStatus_State) EnumDescriptor

func (ClusterOperationStatus_State) EnumDescriptor() ([]byte, []int)

Deprecated: Use ClusterOperationStatus_State.Descriptor instead.

func (ClusterOperationStatus_State) Number

func (ClusterOperationStatus_State) String

func (ClusterOperationStatus_State) Type

ClusterSelector

type ClusterSelector struct {
	Zone string `protobuf:"bytes,1,opt,name=zone,proto3" json:"zone,omitempty"`

	ClusterLabels map[string]string "" /* 188 byte string literal not displayed */

}

A selector that chooses target cluster for jobs based on metadata.

func (*ClusterSelector) Descriptor

func (*ClusterSelector) Descriptor() ([]byte, []int)

Deprecated: Use ClusterSelector.ProtoReflect.Descriptor instead.

func (*ClusterSelector) GetClusterLabels

func (x *ClusterSelector) GetClusterLabels() map[string]string

func (*ClusterSelector) GetZone

func (x *ClusterSelector) GetZone() string

func (*ClusterSelector) ProtoMessage

func (*ClusterSelector) ProtoMessage()

func (*ClusterSelector) ProtoReflect

func (x *ClusterSelector) ProtoReflect() protoreflect.Message

func (*ClusterSelector) Reset

func (x *ClusterSelector) Reset()

func (*ClusterSelector) String

func (x *ClusterSelector) String() string

ClusterStatus

type ClusterStatus struct {

	// Output only. The cluster's state.
	State ClusterStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1.ClusterStatus_State" json:"state,omitempty"`
	// Optional. Output only. Details of cluster's state.
	Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
	// Output only. Time when this state was entered (see JSON representation of
	// [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
	StateStartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
	// Output only. Additional state information that includes
	// status reported by the agent.
	Substate ClusterStatus_Substate `protobuf:"varint,4,opt,name=substate,proto3,enum=google.cloud.dataproc.v1.ClusterStatus_Substate" json:"substate,omitempty"`
	// contains filtered or unexported fields
}

The status of a cluster and its instances.

func (*ClusterStatus) Descriptor

func (*ClusterStatus) Descriptor() ([]byte, []int)

Deprecated: Use ClusterStatus.ProtoReflect.Descriptor instead.

func (*ClusterStatus) GetDetail

func (x *ClusterStatus) GetDetail() string

func (*ClusterStatus) GetState

func (x *ClusterStatus) GetState() ClusterStatus_State

func (*ClusterStatus) GetStateStartTime

func (x *ClusterStatus) GetStateStartTime() *timestamppb.Timestamp

func (*ClusterStatus) GetSubstate

func (x *ClusterStatus) GetSubstate() ClusterStatus_Substate

func (*ClusterStatus) ProtoMessage

func (*ClusterStatus) ProtoMessage()

func (*ClusterStatus) ProtoReflect

func (x *ClusterStatus) ProtoReflect() protoreflect.Message

func (*ClusterStatus) Reset

func (x *ClusterStatus) Reset()

func (*ClusterStatus) String

func (x *ClusterStatus) String() string

ClusterStatus_State

type ClusterStatus_State int32

The cluster state.

ClusterStatus_UNKNOWN, ClusterStatus_CREATING, ClusterStatus_RUNNING, ClusterStatus_ERROR, ClusterStatus_ERROR_DUE_TO_UPDATE, ClusterStatus_DELETING, ClusterStatus_UPDATING, ClusterStatus_STOPPING, ClusterStatus_STOPPED, ClusterStatus_STARTING

const (
	// The cluster state is unknown.
	ClusterStatus_UNKNOWN ClusterStatus_State = 0
	// The cluster is being created and set up. It is not ready for use.
	ClusterStatus_CREATING ClusterStatus_State = 1
	// The cluster is currently running and healthy. It is ready for use.
	//
	// **Note:** The cluster state changes from "creating" to "running" status
	// after the master node(s), first two primary worker nodes (and the last
	// primary worker node if primary workers > 2) are running.
	ClusterStatus_RUNNING ClusterStatus_State = 2
	// The cluster encountered an error. It is not ready for use.
	ClusterStatus_ERROR ClusterStatus_State = 3
	// The cluster has encountered an error while being updated. Jobs can
	// be submitted to the cluster, but the cluster cannot be updated.
	ClusterStatus_ERROR_DUE_TO_UPDATE ClusterStatus_State = 9
	// The cluster is being deleted. It cannot be used.
	ClusterStatus_DELETING ClusterStatus_State = 4
	// The cluster is being updated. It continues to accept and process jobs.
	ClusterStatus_UPDATING ClusterStatus_State = 5
	// The cluster is being stopped. It cannot be used.
	ClusterStatus_STOPPING ClusterStatus_State = 6
	// The cluster is currently stopped. It is not ready for use.
	ClusterStatus_STOPPED ClusterStatus_State = 7
	// The cluster is being started. It is not ready for use.
	ClusterStatus_STARTING ClusterStatus_State = 8
)

func (ClusterStatus_State) Descriptor

func (ClusterStatus_State) Enum

func (ClusterStatus_State) EnumDescriptor

func (ClusterStatus_State) EnumDescriptor() ([]byte, []int)

Deprecated: Use ClusterStatus_State.Descriptor instead.

func (ClusterStatus_State) Number

func (ClusterStatus_State) String

func (x ClusterStatus_State) String() string

func (ClusterStatus_State) Type

ClusterStatus_Substate

type ClusterStatus_Substate int32

The cluster substate.

ClusterStatus_UNSPECIFIED, ClusterStatus_UNHEALTHY, ClusterStatus_STALE_STATUS

const (
	// The cluster substate is unknown.
	ClusterStatus_UNSPECIFIED ClusterStatus_Substate = 0
	// The cluster is known to be in an unhealthy state
	// (for example, critical daemons are not running or HDFS capacity is
	// exhausted).
	//
	// Applies to RUNNING state.
	ClusterStatus_UNHEALTHY ClusterStatus_Substate = 1
	// The agent-reported status is out of date (may occur if
	// Dataproc loses communication with Agent).
	//
	// Applies to RUNNING state.
	ClusterStatus_STALE_STATUS ClusterStatus_Substate = 2
)

func (ClusterStatus_Substate) Descriptor

func (ClusterStatus_Substate) Enum

func (ClusterStatus_Substate) EnumDescriptor

func (ClusterStatus_Substate) EnumDescriptor() ([]byte, []int)

Deprecated: Use ClusterStatus_Substate.Descriptor instead.

func (ClusterStatus_Substate) Number

func (ClusterStatus_Substate) String

func (x ClusterStatus_Substate) String() string

func (ClusterStatus_Substate) Type

Component

type Component int32

Cluster components that can be activated.

const (
	// Unspecified component. Specifying this will cause Cluster creation to fail.
	Component_COMPONENT_UNSPECIFIED Component = 0
	// The Anaconda python distribution. The Anaconda component is not supported
	// in the Dataproc
	// 2.0
	// image. The 2.0 image is pre-installed with Miniconda.
	Component_ANACONDA Component = 5
	// Docker
	Component_DOCKER Component = 13
	// The Druid query engine. (alpha)
	Component_DRUID Component = 9
	// Flink
	Component_FLINK Component = 14
	// HBase. (beta)
	Component_HBASE Component = 11
	// The Hive Web HCatalog (the REST service for accessing HCatalog).
	Component_HIVE_WEBHCAT Component = 3
	// The Jupyter Notebook.
	Component_JUPYTER Component = 1
	// The Presto query engine.
	Component_PRESTO Component = 6
	// The Ranger service.
	Component_RANGER Component = 12
	// The Solr service.
	Component_SOLR Component = 10
	// The Zeppelin notebook.
	Component_ZEPPELIN Component = 4
	// The Zookeeper service.
	Component_ZOOKEEPER Component = 8
)

func (Component) Descriptor

func (Component) Descriptor() protoreflect.EnumDescriptor

func (Component) Enum

func (x Component) Enum() *Component

func (Component) EnumDescriptor

func (Component) EnumDescriptor() ([]byte, []int)

Deprecated: Use Component.Descriptor instead.

func (Component) Number

func (x Component) Number() protoreflect.EnumNumber

func (Component) String

func (x Component) String() string

func (Component) Type

ConfidentialInstanceConfig

type ConfidentialInstanceConfig struct {
	EnableConfidentialCompute bool "" /* 139 byte string literal not displayed */

}

Confidential Instance Config for clusters using Confidential VMs

func (*ConfidentialInstanceConfig) Descriptor

func (*ConfidentialInstanceConfig) Descriptor() ([]byte, []int)

Deprecated: Use ConfidentialInstanceConfig.ProtoReflect.Descriptor instead.

func (*ConfidentialInstanceConfig) GetEnableConfidentialCompute

func (x *ConfidentialInstanceConfig) GetEnableConfidentialCompute() bool

func (*ConfidentialInstanceConfig) ProtoMessage

func (*ConfidentialInstanceConfig) ProtoMessage()

func (*ConfidentialInstanceConfig) ProtoReflect

func (*ConfidentialInstanceConfig) Reset

func (x *ConfidentialInstanceConfig) Reset()

func (*ConfidentialInstanceConfig) String

func (x *ConfidentialInstanceConfig) String() string

CreateAutoscalingPolicyRequest

type CreateAutoscalingPolicyRequest struct {

	// Required. The "resource name" of the region or location, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	//   - For `projects.regions.autoscalingPolicies.create`, the resource name
	//     of the region has the following format:
	//     `projects/{project_id}/regions/{region}`
	//
	//   - For `projects.locations.autoscalingPolicies.create`, the resource name
	//     of the location has the following format:
	//     `projects/{project_id}/locations/{location}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The autoscaling policy to create.
	Policy *AutoscalingPolicy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"`
	// contains filtered or unexported fields
}

A request to create an autoscaling policy.

func (*CreateAutoscalingPolicyRequest) Descriptor

func (*CreateAutoscalingPolicyRequest) Descriptor() ([]byte, []int)

Deprecated: Use CreateAutoscalingPolicyRequest.ProtoReflect.Descriptor instead.

func (*CreateAutoscalingPolicyRequest) GetParent

func (x *CreateAutoscalingPolicyRequest) GetParent() string

func (*CreateAutoscalingPolicyRequest) GetPolicy

func (*CreateAutoscalingPolicyRequest) ProtoMessage

func (*CreateAutoscalingPolicyRequest) ProtoMessage()

func (*CreateAutoscalingPolicyRequest) ProtoReflect

func (*CreateAutoscalingPolicyRequest) Reset

func (x *CreateAutoscalingPolicyRequest) Reset()

func (*CreateAutoscalingPolicyRequest) String

CreateBatchRequest

type CreateBatchRequest struct {

	// Required. The parent resource where this batch will be created.
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The batch to create.
	Batch *Batch `protobuf:"bytes,2,opt,name=batch,proto3" json:"batch,omitempty"`
	// Optional. The ID to use for the batch, which will become the final component of
	// the batch's resource name.
	//
	// This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`.
	BatchId string `protobuf:"bytes,3,opt,name=batch_id,json=batchId,proto3" json:"batch_id,omitempty"`
	// Optional. A unique ID used to identify the request. If the service
	// receives two
	// [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s
	// with the same request_id, the second request is ignored and the
	// Operation that corresponds to the first Batch created and stored
	// in the backend is returned.
	//
	// Recommendation: Set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The value must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	// contains filtered or unexported fields
}

A request to create a batch workload.

func (*CreateBatchRequest) Descriptor

func (*CreateBatchRequest) Descriptor() ([]byte, []int)

Deprecated: Use CreateBatchRequest.ProtoReflect.Descriptor instead.

func (*CreateBatchRequest) GetBatch

func (x *CreateBatchRequest) GetBatch() *Batch

func (*CreateBatchRequest) GetBatchId

func (x *CreateBatchRequest) GetBatchId() string

func (*CreateBatchRequest) GetParent

func (x *CreateBatchRequest) GetParent() string

func (*CreateBatchRequest) GetRequestId

func (x *CreateBatchRequest) GetRequestId() string

func (*CreateBatchRequest) ProtoMessage

func (*CreateBatchRequest) ProtoMessage()

func (*CreateBatchRequest) ProtoReflect

func (x *CreateBatchRequest) ProtoReflect() protoreflect.Message

func (*CreateBatchRequest) Reset

func (x *CreateBatchRequest) Reset()

func (*CreateBatchRequest) String

func (x *CreateBatchRequest) String() string

CreateClusterRequest

type CreateClusterRequest struct {
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`

	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`

	Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"`

	RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`

	ActionOnFailedPrimaryWorkers FailureAction "" /* 196 byte string literal not displayed */

}

A request to create a cluster.

func (*CreateClusterRequest) Descriptor

func (*CreateClusterRequest) Descriptor() ([]byte, []int)

Deprecated: Use CreateClusterRequest.ProtoReflect.Descriptor instead.

func (*CreateClusterRequest) GetActionOnFailedPrimaryWorkers

func (x *CreateClusterRequest) GetActionOnFailedPrimaryWorkers() FailureAction

func (*CreateClusterRequest) GetCluster

func (x *CreateClusterRequest) GetCluster() *Cluster

func (*CreateClusterRequest) GetProjectId

func (x *CreateClusterRequest) GetProjectId() string

func (*CreateClusterRequest) GetRegion

func (x *CreateClusterRequest) GetRegion() string

func (*CreateClusterRequest) GetRequestId

func (x *CreateClusterRequest) GetRequestId() string

func (*CreateClusterRequest) ProtoMessage

func (*CreateClusterRequest) ProtoMessage()

func (*CreateClusterRequest) ProtoReflect

func (x *CreateClusterRequest) ProtoReflect() protoreflect.Message

func (*CreateClusterRequest) Reset

func (x *CreateClusterRequest) Reset()

func (*CreateClusterRequest) String

func (x *CreateClusterRequest) String() string

CreateNodeGroupRequest

type CreateNodeGroupRequest struct {

	// Required. The parent resource where this node group will be created.
	// Format: `projects/{project}/regions/{region}/clusters/{cluster}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The node group to create.
	NodeGroup *NodeGroup `protobuf:"bytes,2,opt,name=node_group,json=nodeGroup,proto3" json:"node_group,omitempty"`
	// Optional. An optional node group ID. Generated if not specified.
	//
	// The ID must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). Cannot begin or end with underscore
	// or hyphen. Must consist of from 3 to 33 characters.
	NodeGroupId string `protobuf:"bytes,4,opt,name=node_group_id,json=nodeGroupId,proto3" json:"node_group_id,omitempty"`
	// Optional. A unique ID used to identify the request. If the server receives
	// two
	// [CreateNodeGroupRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequests)
	// with the same ID, the second request is ignored and the
	// first [google.longrunning.Operation][google.longrunning.Operation] created
	// and stored in the backend is returned.
	//
	// Recommendation: Set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The ID must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	// contains filtered or unexported fields
}

A request to create a node group.

func (*CreateNodeGroupRequest) Descriptor

func (*CreateNodeGroupRequest) Descriptor() ([]byte, []int)

Deprecated: Use CreateNodeGroupRequest.ProtoReflect.Descriptor instead.

func (*CreateNodeGroupRequest) GetNodeGroup

func (x *CreateNodeGroupRequest) GetNodeGroup() *NodeGroup

func (*CreateNodeGroupRequest) GetNodeGroupId

func (x *CreateNodeGroupRequest) GetNodeGroupId() string

func (*CreateNodeGroupRequest) GetParent

func (x *CreateNodeGroupRequest) GetParent() string

func (*CreateNodeGroupRequest) GetRequestId

func (x *CreateNodeGroupRequest) GetRequestId() string

func (*CreateNodeGroupRequest) ProtoMessage

func (*CreateNodeGroupRequest) ProtoMessage()

func (*CreateNodeGroupRequest) ProtoReflect

func (x *CreateNodeGroupRequest) ProtoReflect() protoreflect.Message

func (*CreateNodeGroupRequest) Reset

func (x *CreateNodeGroupRequest) Reset()

func (*CreateNodeGroupRequest) String

func (x *CreateNodeGroupRequest) String() string

CreateWorkflowTemplateRequest

type CreateWorkflowTemplateRequest struct {

	// Required. The resource name of the region or location, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	//   - For `projects.regions.workflowTemplates.create`, the resource name of the
	//     region has the following format:
	//     `projects/{project_id}/regions/{region}`
	//
	//   - For `projects.locations.workflowTemplates.create`, the resource name of
	//     the location has the following format:
	//     `projects/{project_id}/locations/{location}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The Dataproc workflow template to create.
	Template *WorkflowTemplate `protobuf:"bytes,2,opt,name=template,proto3" json:"template,omitempty"`
	// contains filtered or unexported fields
}

A request to create a workflow template.

func (*CreateWorkflowTemplateRequest) Descriptor

func (*CreateWorkflowTemplateRequest) Descriptor() ([]byte, []int)

Deprecated: Use CreateWorkflowTemplateRequest.ProtoReflect.Descriptor instead.

func (*CreateWorkflowTemplateRequest) GetParent

func (x *CreateWorkflowTemplateRequest) GetParent() string

func (*CreateWorkflowTemplateRequest) GetTemplate

func (*CreateWorkflowTemplateRequest) ProtoMessage

func (*CreateWorkflowTemplateRequest) ProtoMessage()

func (*CreateWorkflowTemplateRequest) ProtoReflect

func (*CreateWorkflowTemplateRequest) Reset

func (x *CreateWorkflowTemplateRequest) Reset()

func (*CreateWorkflowTemplateRequest) String

DataprocMetricConfig

type DataprocMetricConfig struct {

	// Required. Metrics sources to enable.
	Metrics []*DataprocMetricConfig_Metric `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty"`
	// contains filtered or unexported fields
}

Dataproc metric config.

func (*DataprocMetricConfig) Descriptor

func (*DataprocMetricConfig) Descriptor() ([]byte, []int)

Deprecated: Use DataprocMetricConfig.ProtoReflect.Descriptor instead.

func (*DataprocMetricConfig) GetMetrics

func (*DataprocMetricConfig) ProtoMessage

func (*DataprocMetricConfig) ProtoMessage()

func (*DataprocMetricConfig) ProtoReflect

func (x *DataprocMetricConfig) ProtoReflect() protoreflect.Message

func (*DataprocMetricConfig) Reset

func (x *DataprocMetricConfig) Reset()

func (*DataprocMetricConfig) String

func (x *DataprocMetricConfig) String() string

DataprocMetricConfig_Metric

type DataprocMetricConfig_Metric struct {
	MetricSource DataprocMetricConfig_MetricSource "" /* 162 byte string literal not displayed */

	MetricOverrides []string `protobuf:"bytes,2,rep,name=metric_overrides,json=metricOverrides,proto3" json:"metric_overrides,omitempty"`

}

A Dataproc OSS metric.

func (*DataprocMetricConfig_Metric) Descriptor

func (*DataprocMetricConfig_Metric) Descriptor() ([]byte, []int)

Deprecated: Use DataprocMetricConfig_Metric.ProtoReflect.Descriptor instead.

func (*DataprocMetricConfig_Metric) GetMetricOverrides

func (x *DataprocMetricConfig_Metric) GetMetricOverrides() []string

func (*DataprocMetricConfig_Metric) GetMetricSource

func (*DataprocMetricConfig_Metric) ProtoMessage

func (*DataprocMetricConfig_Metric) ProtoMessage()

func (*DataprocMetricConfig_Metric) ProtoReflect

func (*DataprocMetricConfig_Metric) Reset

func (x *DataprocMetricConfig_Metric) Reset()

func (*DataprocMetricConfig_Metric) String

func (x *DataprocMetricConfig_Metric) String() string

DataprocMetricConfig_MetricSource

type DataprocMetricConfig_MetricSource int32

A source for the collection of Dataproc OSS metrics (see available OSS metrics).

DataprocMetricConfig_METRIC_SOURCE_UNSPECIFIED, DataprocMetricConfig_MONITORING_AGENT_DEFAULTS, DataprocMetricConfig_HDFS, DataprocMetricConfig_SPARK, DataprocMetricConfig_YARN, DataprocMetricConfig_SPARK_HISTORY_SERVER, DataprocMetricConfig_HIVESERVER2

const (
	// Required unspecified metric source.
	DataprocMetricConfig_METRIC_SOURCE_UNSPECIFIED DataprocMetricConfig_MetricSource = 0
	// Default monitoring agent metrics. If this source is enabled,
	// Dataproc enables the monitoring agent in Compute Engine,
	// and collects default monitoring agent metrics, which are published
	// with an `agent.googleapis.com` prefix.
	DataprocMetricConfig_MONITORING_AGENT_DEFAULTS DataprocMetricConfig_MetricSource = 1
	// HDFS metric source.
	DataprocMetricConfig_HDFS DataprocMetricConfig_MetricSource = 2
	// Spark metric source.
	DataprocMetricConfig_SPARK DataprocMetricConfig_MetricSource = 3
	// YARN metric source.
	DataprocMetricConfig_YARN DataprocMetricConfig_MetricSource = 4
	// Spark History Server metric source.
	DataprocMetricConfig_SPARK_HISTORY_SERVER DataprocMetricConfig_MetricSource = 5
	// Hiveserver2 metric source.
	DataprocMetricConfig_HIVESERVER2 DataprocMetricConfig_MetricSource = 6
)

func (DataprocMetricConfig_MetricSource) Descriptor

func (DataprocMetricConfig_MetricSource) Enum

func (DataprocMetricConfig_MetricSource) EnumDescriptor

func (DataprocMetricConfig_MetricSource) EnumDescriptor() ([]byte, []int)

Deprecated: Use DataprocMetricConfig_MetricSource.Descriptor instead.

func (DataprocMetricConfig_MetricSource) Number

func (DataprocMetricConfig_MetricSource) String

func (DataprocMetricConfig_MetricSource) Type

DeleteAutoscalingPolicyRequest

type DeleteAutoscalingPolicyRequest struct {

	// Required. The "resource name" of the autoscaling policy, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	//   - For `projects.regions.autoscalingPolicies.delete`, the resource name
	//     of the policy has the following format:
	//     `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`
	//
	//   - For `projects.locations.autoscalingPolicies.delete`, the resource name
	//     of the policy has the following format:
	//     `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// contains filtered or unexported fields
}

A request to delete an autoscaling policy.

Autoscaling policies in use by one or more clusters will not be deleted.

func (*DeleteAutoscalingPolicyRequest) Descriptor

func (*DeleteAutoscalingPolicyRequest) Descriptor() ([]byte, []int)

Deprecated: Use DeleteAutoscalingPolicyRequest.ProtoReflect.Descriptor instead.

func (*DeleteAutoscalingPolicyRequest) GetName

func (*DeleteAutoscalingPolicyRequest) ProtoMessage

func (*DeleteAutoscalingPolicyRequest) ProtoMessage()

func (*DeleteAutoscalingPolicyRequest) ProtoReflect

func (*DeleteAutoscalingPolicyRequest) Reset

func (x *DeleteAutoscalingPolicyRequest) Reset()

func (*DeleteAutoscalingPolicyRequest) String

DeleteBatchRequest

type DeleteBatchRequest struct {

	// Required. The name of the batch resource to delete.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// contains filtered or unexported fields
}

A request to delete a batch workload.

func (*DeleteBatchRequest) Descriptor

func (*DeleteBatchRequest) Descriptor() ([]byte, []int)

Deprecated: Use DeleteBatchRequest.ProtoReflect.Descriptor instead.

func (*DeleteBatchRequest) GetName

func (x *DeleteBatchRequest) GetName() string

func (*DeleteBatchRequest) ProtoMessage

func (*DeleteBatchRequest) ProtoMessage()

func (*DeleteBatchRequest) ProtoReflect

func (x *DeleteBatchRequest) ProtoReflect() protoreflect.Message

func (*DeleteBatchRequest) Reset

func (x *DeleteBatchRequest) Reset()

func (*DeleteBatchRequest) String

func (x *DeleteBatchRequest) String() string

DeleteClusterRequest

type DeleteClusterRequest struct {

	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Optional. Specifying the `cluster_uuid` means the RPC should fail
	// (with error NOT_FOUND) if cluster with specified UUID does not exist.
	ClusterUuid string `protobuf:"bytes,4,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	// Optional. A unique ID used to identify the request. If the server
	// receives two
	// [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s
	// with the same id, then the second request will be ignored and the
	// first [google.longrunning.Operation][google.longrunning.Operation] created
	// and stored in the backend is returned.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The ID must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	// contains filtered or unexported fields
}

A request to delete a cluster.

func (*DeleteClusterRequest) Descriptor

func (*DeleteClusterRequest) Descriptor() ([]byte, []int)

Deprecated: Use DeleteClusterRequest.ProtoReflect.Descriptor instead.

func (*DeleteClusterRequest) GetClusterName

func (x *DeleteClusterRequest) GetClusterName() string

func (*DeleteClusterRequest) GetClusterUuid

func (x *DeleteClusterRequest) GetClusterUuid() string

func (*DeleteClusterRequest) GetProjectId

func (x *DeleteClusterRequest) GetProjectId() string

func (*DeleteClusterRequest) GetRegion

func (x *DeleteClusterRequest) GetRegion() string

func (*DeleteClusterRequest) GetRequestId

func (x *DeleteClusterRequest) GetRequestId() string

func (*DeleteClusterRequest) ProtoMessage

func (*DeleteClusterRequest) ProtoMessage()

func (*DeleteClusterRequest) ProtoReflect

func (x *DeleteClusterRequest) ProtoReflect() protoreflect.Message

func (*DeleteClusterRequest) Reset

func (x *DeleteClusterRequest) Reset()

func (*DeleteClusterRequest) String

func (x *DeleteClusterRequest) String() string

DeleteJobRequest

type DeleteJobRequest struct {

	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job ID.
	JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	// contains filtered or unexported fields
}

A request to delete a job.

func (*DeleteJobRequest) Descriptor

func (*DeleteJobRequest) Descriptor() ([]byte, []int)

Deprecated: Use DeleteJobRequest.ProtoReflect.Descriptor instead.

func (*DeleteJobRequest) GetJobId

func (x *DeleteJobRequest) GetJobId() string

func (*DeleteJobRequest) GetProjectId

func (x *DeleteJobRequest) GetProjectId() string

func (*DeleteJobRequest) GetRegion

func (x *DeleteJobRequest) GetRegion() string

func (*DeleteJobRequest) ProtoMessage

func (*DeleteJobRequest) ProtoMessage()

func (*DeleteJobRequest) ProtoReflect

func (x *DeleteJobRequest) ProtoReflect() protoreflect.Message

func (*DeleteJobRequest) Reset

func (x *DeleteJobRequest) Reset()

func (*DeleteJobRequest) String

func (x *DeleteJobRequest) String() string

DeleteWorkflowTemplateRequest

type DeleteWorkflowTemplateRequest struct {

	// Required. The resource name of the workflow template, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.workflowTemplates.delete`, the resource name
	// of the template has the following format:
	//
	//	`projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
	//
	//   - For `projects.locations.workflowTemplates.instantiate`, the resource name
	//     of the template has the following format:
	//     `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Optional. The version of workflow template to delete. If specified,
	// will only delete the template if the current server version matches
	// specified version.
	Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
	// contains filtered or unexported fields
}

A request to delete a workflow template.

Currently started workflows will remain running.

func (*DeleteWorkflowTemplateRequest) Descriptor

func (*DeleteWorkflowTemplateRequest) Descriptor() ([]byte, []int)

Deprecated: Use DeleteWorkflowTemplateRequest.ProtoReflect.Descriptor instead.

func (*DeleteWorkflowTemplateRequest) GetName

func (*DeleteWorkflowTemplateRequest) GetVersion

func (x *DeleteWorkflowTemplateRequest) GetVersion() int32

func (*DeleteWorkflowTemplateRequest) ProtoMessage

func (*DeleteWorkflowTemplateRequest) ProtoMessage()

func (*DeleteWorkflowTemplateRequest) ProtoReflect

func (*DeleteWorkflowTemplateRequest) Reset

func (x *DeleteWorkflowTemplateRequest) Reset()

func (*DeleteWorkflowTemplateRequest) String

DiagnoseClusterRequest

type DiagnoseClusterRequest struct {

	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// contains filtered or unexported fields
}

A request to collect cluster diagnostic information.

func (*DiagnoseClusterRequest) Descriptor

func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int)

Deprecated: Use DiagnoseClusterRequest.ProtoReflect.Descriptor instead.

func (*DiagnoseClusterRequest) GetClusterName

func (x *DiagnoseClusterRequest) GetClusterName() string

func (*DiagnoseClusterRequest) GetProjectId

func (x *DiagnoseClusterRequest) GetProjectId() string

func (*DiagnoseClusterRequest) GetRegion

func (x *DiagnoseClusterRequest) GetRegion() string

func (*DiagnoseClusterRequest) ProtoMessage

func (*DiagnoseClusterRequest) ProtoMessage()

func (*DiagnoseClusterRequest) ProtoReflect

func (x *DiagnoseClusterRequest) ProtoReflect() protoreflect.Message

func (*DiagnoseClusterRequest) Reset

func (x *DiagnoseClusterRequest) Reset()

func (*DiagnoseClusterRequest) String

func (x *DiagnoseClusterRequest) String() string

DiagnoseClusterResults

type DiagnoseClusterResults struct {

	// Output only. The Cloud Storage URI of the diagnostic output.
	// The output report is a plain text file with a summary of collected
	// diagnostics.
	OutputUri string `protobuf:"bytes,1,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
	// contains filtered or unexported fields
}

The location of diagnostic output.

func (*DiagnoseClusterResults) Descriptor

func (*DiagnoseClusterResults) Descriptor() ([]byte, []int)

Deprecated: Use DiagnoseClusterResults.ProtoReflect.Descriptor instead.

func (*DiagnoseClusterResults) GetOutputUri

func (x *DiagnoseClusterResults) GetOutputUri() string

func (*DiagnoseClusterResults) ProtoMessage

func (*DiagnoseClusterResults) ProtoMessage()

func (*DiagnoseClusterResults) ProtoReflect

func (x *DiagnoseClusterResults) ProtoReflect() protoreflect.Message

func (*DiagnoseClusterResults) Reset

func (x *DiagnoseClusterResults) Reset()

func (*DiagnoseClusterResults) String

func (x *DiagnoseClusterResults) String() string

DiskConfig

type DiskConfig struct {

	// Optional. Type of the boot disk (default is "pd-standard").
	// Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive),
	// "pd-ssd" (Persistent Disk Solid State Drive),
	// or "pd-standard" (Persistent Disk Hard Disk Drive).
	// See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).
	BootDiskType string `protobuf:"bytes,3,opt,name=boot_disk_type,json=bootDiskType,proto3" json:"boot_disk_type,omitempty"`
	// Optional. Size in GB of the boot disk (default is 500GB).
	BootDiskSizeGb int32 `protobuf:"varint,1,opt,name=boot_disk_size_gb,json=bootDiskSizeGb,proto3" json:"boot_disk_size_gb,omitempty"`
	// Optional. Number of attached SSDs, from 0 to 8 (default is 0).
	// If SSDs are not attached, the boot disk is used to store runtime logs and
	// [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
	// If one or more SSDs are attached, this runtime bulk
	// data is spread across them, and the boot disk contains only basic
	// config and installed binaries.
	NumLocalSsds int32 `protobuf:"varint,2,opt,name=num_local_ssds,json=numLocalSsds,proto3" json:"num_local_ssds,omitempty"`
	// Optional. Interface type of local SSDs (default is "scsi").
	// Valid values: "scsi" (Small Computer System Interface),
	// "nvme" (Non-Volatile Memory Express).
	// See [local SSD
	// performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
	LocalSsdInterface string `protobuf:"bytes,4,opt,name=local_ssd_interface,json=localSsdInterface,proto3" json:"local_ssd_interface,omitempty"`
	// contains filtered or unexported fields
}

Specifies the config of disk options for a group of VM instances.

func (*DiskConfig) Descriptor

func (*DiskConfig) Descriptor() ([]byte, []int)

Deprecated: Use DiskConfig.ProtoReflect.Descriptor instead.

func (*DiskConfig) GetBootDiskSizeGb

func (x *DiskConfig) GetBootDiskSizeGb() int32

func (*DiskConfig) GetBootDiskType

func (x *DiskConfig) GetBootDiskType() string

func (*DiskConfig) GetLocalSsdInterface

func (x *DiskConfig) GetLocalSsdInterface() string

func (*DiskConfig) GetNumLocalSsds

func (x *DiskConfig) GetNumLocalSsds() int32

func (*DiskConfig) ProtoMessage

func (*DiskConfig) ProtoMessage()

func (*DiskConfig) ProtoReflect

func (x *DiskConfig) ProtoReflect() protoreflect.Message

func (*DiskConfig) Reset

func (x *DiskConfig) Reset()

func (*DiskConfig) String

func (x *DiskConfig) String() string

DriverSchedulingConfig

type DriverSchedulingConfig struct {

	// Required. The amount of memory in MB the driver is requesting.
	MemoryMb int32 `protobuf:"varint,1,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb,omitempty"`
	// Required. The number of vCPUs the driver is requesting.
	Vcores int32 `protobuf:"varint,2,opt,name=vcores,proto3" json:"vcores,omitempty"`
	// contains filtered or unexported fields
}

Driver scheduling configuration.

func (*DriverSchedulingConfig) Descriptor

func (*DriverSchedulingConfig) Descriptor() ([]byte, []int)

Deprecated: Use DriverSchedulingConfig.ProtoReflect.Descriptor instead.

func (*DriverSchedulingConfig) GetMemoryMb

func (x *DriverSchedulingConfig) GetMemoryMb() int32

func (*DriverSchedulingConfig) GetVcores

func (x *DriverSchedulingConfig) GetVcores() int32

func (*DriverSchedulingConfig) ProtoMessage

func (*DriverSchedulingConfig) ProtoMessage()

func (*DriverSchedulingConfig) ProtoReflect

func (x *DriverSchedulingConfig) ProtoReflect() protoreflect.Message

func (*DriverSchedulingConfig) Reset

func (x *DriverSchedulingConfig) Reset()

func (*DriverSchedulingConfig) String

func (x *DriverSchedulingConfig) String() string

EncryptionConfig

type EncryptionConfig struct {

	// Optional. The Cloud KMS key name to use for PD disk encryption for all
	// instances in the cluster.
	GcePdKmsKeyName string `protobuf:"bytes,1,opt,name=gce_pd_kms_key_name,json=gcePdKmsKeyName,proto3" json:"gce_pd_kms_key_name,omitempty"`
	// contains filtered or unexported fields
}

Encryption settings for the cluster.

func (*EncryptionConfig) Descriptor

func (*EncryptionConfig) Descriptor() ([]byte, []int)

Deprecated: Use EncryptionConfig.ProtoReflect.Descriptor instead.

func (*EncryptionConfig) GetGcePdKmsKeyName

func (x *EncryptionConfig) GetGcePdKmsKeyName() string

func (*EncryptionConfig) ProtoMessage

func (*EncryptionConfig) ProtoMessage()

func (*EncryptionConfig) ProtoReflect

func (x *EncryptionConfig) ProtoReflect() protoreflect.Message

func (*EncryptionConfig) Reset

func (x *EncryptionConfig) Reset()

func (*EncryptionConfig) String

func (x *EncryptionConfig) String() string

EndpointConfig

type EndpointConfig struct {
	HttpPorts map[string]string "" /* 176 byte string literal not displayed */

	EnableHttpPortAccess bool `protobuf:"varint,2,opt,name=enable_http_port_access,json=enableHttpPortAccess,proto3" json:"enable_http_port_access,omitempty"`

}

Endpoint config for this cluster

func (*EndpointConfig) Descriptor

func (*EndpointConfig) Descriptor() ([]byte, []int)

Deprecated: Use EndpointConfig.ProtoReflect.Descriptor instead.

func (*EndpointConfig) GetEnableHttpPortAccess

func (x *EndpointConfig) GetEnableHttpPortAccess() bool

func (*EndpointConfig) GetHttpPorts

func (x *EndpointConfig) GetHttpPorts() map[string]string

func (*EndpointConfig) ProtoMessage

func (*EndpointConfig) ProtoMessage()

func (*EndpointConfig) ProtoReflect

func (x *EndpointConfig) ProtoReflect() protoreflect.Message

func (*EndpointConfig) Reset

func (x *EndpointConfig) Reset()

func (*EndpointConfig) String

func (x *EndpointConfig) String() string

EnvironmentConfig

type EnvironmentConfig struct {

	// Optional. Execution configuration for a workload.
	ExecutionConfig *ExecutionConfig `protobuf:"bytes,1,opt,name=execution_config,json=executionConfig,proto3" json:"execution_config,omitempty"`
	// Optional. Peripherals configuration that workload has access to.
	PeripheralsConfig *PeripheralsConfig `protobuf:"bytes,2,opt,name=peripherals_config,json=peripheralsConfig,proto3" json:"peripherals_config,omitempty"`
	// contains filtered or unexported fields
}

Environment configuration for a workload.

func (*EnvironmentConfig) Descriptor

func (*EnvironmentConfig) Descriptor() ([]byte, []int)

Deprecated: Use EnvironmentConfig.ProtoReflect.Descriptor instead.

func (*EnvironmentConfig) GetExecutionConfig

func (x *EnvironmentConfig) GetExecutionConfig() *ExecutionConfig

func (*EnvironmentConfig) GetPeripheralsConfig

func (x *EnvironmentConfig) GetPeripheralsConfig() *PeripheralsConfig

func (*EnvironmentConfig) ProtoMessage

func (*EnvironmentConfig) ProtoMessage()

func (*EnvironmentConfig) ProtoReflect

func (x *EnvironmentConfig) ProtoReflect() protoreflect.Message

func (*EnvironmentConfig) Reset

func (x *EnvironmentConfig) Reset()

func (*EnvironmentConfig) String

func (x *EnvironmentConfig) String() string

ExecutionConfig

type ExecutionConfig struct {

	// Optional. Service account that used to execute workload.
	ServiceAccount string `protobuf:"bytes,2,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"`
	// Network configuration for workload execution.
	//
	// Types that are assignable to Network:
	//
	//	*ExecutionConfig_NetworkUri
	//	*ExecutionConfig_SubnetworkUri
	Network isExecutionConfig_Network `protobuf_oneof:"network"`
	// Optional. Tags used for network traffic control.
	NetworkTags []string `protobuf:"bytes,6,rep,name=network_tags,json=networkTags,proto3" json:"network_tags,omitempty"`
	// Optional. The Cloud KMS key to use for encryption.
	KmsKey string `protobuf:"bytes,7,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"`
	// contains filtered or unexported fields
}

Execution configuration for a workload.

func (*ExecutionConfig) Descriptor

func (*ExecutionConfig) Descriptor() ([]byte, []int)

Deprecated: Use ExecutionConfig.ProtoReflect.Descriptor instead.

func (*ExecutionConfig) GetKmsKey

func (x *ExecutionConfig) GetKmsKey() string

func (*ExecutionConfig) GetNetwork

func (m *ExecutionConfig) GetNetwork() isExecutionConfig_Network

func (*ExecutionConfig) GetNetworkTags

func (x *ExecutionConfig) GetNetworkTags() []string

func (*ExecutionConfig) GetNetworkUri

func (x *ExecutionConfig) GetNetworkUri() string

func (*ExecutionConfig) GetServiceAccount

func (x *ExecutionConfig) GetServiceAccount() string

func (*ExecutionConfig) GetSubnetworkUri

func (x *ExecutionConfig) GetSubnetworkUri() string

func (*ExecutionConfig) ProtoMessage

func (*ExecutionConfig) ProtoMessage()

func (*ExecutionConfig) ProtoReflect

func (x *ExecutionConfig) ProtoReflect() protoreflect.Message

func (*ExecutionConfig) Reset

func (x *ExecutionConfig) Reset()

func (*ExecutionConfig) String

func (x *ExecutionConfig) String() string

ExecutionConfig_NetworkUri

type ExecutionConfig_NetworkUri struct {
	// Optional. Network URI to connect workload to.
	NetworkUri string `protobuf:"bytes,4,opt,name=network_uri,json=networkUri,proto3,oneof"`
}

ExecutionConfig_SubnetworkUri

type ExecutionConfig_SubnetworkUri struct {
	// Optional. Subnetwork URI to connect workload to.
	SubnetworkUri string `protobuf:"bytes,5,opt,name=subnetwork_uri,json=subnetworkUri,proto3,oneof"`
}

FailureAction

type FailureAction int32

Actions in response to failure of a resource associated with a cluster.

FailureAction_FAILURE_ACTION_UNSPECIFIED, FailureAction_NO_ACTION, FailureAction_DELETE

const (
	// When FailureAction is unspecified, failure action defaults to NO_ACTION.
	FailureAction_FAILURE_ACTION_UNSPECIFIED FailureAction = 0
	// Take no action on failure to create a cluster resource. NO_ACTION is the
	// default.
	FailureAction_NO_ACTION FailureAction = 1
	// Delete the failed cluster resource.
	FailureAction_DELETE FailureAction = 2
)

func (FailureAction) Descriptor

func (FailureAction) Enum

func (x FailureAction) Enum() *FailureAction

func (FailureAction) EnumDescriptor

func (FailureAction) EnumDescriptor() ([]byte, []int)

Deprecated: Use FailureAction.Descriptor instead.

func (FailureAction) Number

func (FailureAction) String

func (x FailureAction) String() string

func (FailureAction) Type

GceClusterConfig

type GceClusterConfig struct {
	ZoneUri string `protobuf:"bytes,1,opt,name=zone_uri,json=zoneUri,proto3" json:"zone_uri,omitempty"`

	NetworkUri string `protobuf:"bytes,2,opt,name=network_uri,json=networkUri,proto3" json:"network_uri,omitempty"`

	SubnetworkUri string `protobuf:"bytes,6,opt,name=subnetwork_uri,json=subnetworkUri,proto3" json:"subnetwork_uri,omitempty"`

	InternalIpOnly bool `protobuf:"varint,7,opt,name=internal_ip_only,json=internalIpOnly,proto3" json:"internal_ip_only,omitempty"`

	PrivateIpv6GoogleAccess GceClusterConfig_PrivateIpv6GoogleAccess "" /* 207 byte string literal not displayed */

	ServiceAccount string `protobuf:"bytes,8,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"`

	ServiceAccountScopes []string `protobuf:"bytes,3,rep,name=service_account_scopes,json=serviceAccountScopes,proto3" json:"service_account_scopes,omitempty"`

	Tags []string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty"`

	Metadata map[string]string "" /* 157 byte string literal not displayed */

	ReservationAffinity *ReservationAffinity `protobuf:"bytes,11,opt,name=reservation_affinity,json=reservationAffinity,proto3" json:"reservation_affinity,omitempty"`

	NodeGroupAffinity *NodeGroupAffinity `protobuf:"bytes,13,opt,name=node_group_affinity,json=nodeGroupAffinity,proto3" json:"node_group_affinity,omitempty"`

	ShieldedInstanceConfig *ShieldedInstanceConfig "" /* 130 byte string literal not displayed */

	ConfidentialInstanceConfig *ConfidentialInstanceConfig "" /* 142 byte string literal not displayed */

}

Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.

func (*GceClusterConfig) Descriptor

func (*GceClusterConfig) Descriptor() ([]byte, []int)

Deprecated: Use GceClusterConfig.ProtoReflect.Descriptor instead.

func (*GceClusterConfig) GetConfidentialInstanceConfig

func (x *GceClusterConfig) GetConfidentialInstanceConfig() *ConfidentialInstanceConfig

func (*GceClusterConfig) GetInternalIpOnly

func (x *GceClusterConfig) GetInternalIpOnly() bool

func (*GceClusterConfig) GetMetadata

func (x *GceClusterConfig) GetMetadata() map[string]string

func (*GceClusterConfig) GetNetworkUri

func (x *GceClusterConfig) GetNetworkUri() string

func (*GceClusterConfig) GetNodeGroupAffinity

func (x *GceClusterConfig) GetNodeGroupAffinity() *NodeGroupAffinity

func (*GceClusterConfig) GetPrivateIpv6GoogleAccess

func (x *GceClusterConfig) GetPrivateIpv6GoogleAccess() GceClusterConfig_PrivateIpv6GoogleAccess

func (*GceClusterConfig) GetReservationAffinity

func (x *GceClusterConfig) GetReservationAffinity() *ReservationAffinity

func (*GceClusterConfig) GetServiceAccount

func (x *GceClusterConfig) GetServiceAccount() string

func (*GceClusterConfig) GetServiceAccountScopes

func (x *GceClusterConfig) GetServiceAccountScopes() []string

func (*GceClusterConfig) GetShieldedInstanceConfig

func (x *GceClusterConfig) GetShieldedInstanceConfig() *ShieldedInstanceConfig

func (*GceClusterConfig) GetSubnetworkUri

func (x *GceClusterConfig) GetSubnetworkUri() string

func (*GceClusterConfig) GetTags

func (x *GceClusterConfig) GetTags() []string

func (*GceClusterConfig) GetZoneUri

func (x *GceClusterConfig) GetZoneUri() string

func (*GceClusterConfig) ProtoMessage

func (*GceClusterConfig) ProtoMessage()

func (*GceClusterConfig) ProtoReflect

func (x *GceClusterConfig) ProtoReflect() protoreflect.Message

func (*GceClusterConfig) Reset

func (x *GceClusterConfig) Reset()

func (*GceClusterConfig) String

func (x *GceClusterConfig) String() string

GceClusterConfig_PrivateIpv6GoogleAccess

type GceClusterConfig_PrivateIpv6GoogleAccess int32

PrivateIpv6GoogleAccess controls whether and how Dataproc cluster nodes can communicate with Google Services through gRPC over IPv6. These values are directly mapped to corresponding values in the Compute Engine Instance fields.

GceClusterConfig_PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, GceClusterConfig_INHERIT_FROM_SUBNETWORK, GceClusterConfig_OUTBOUND, GceClusterConfig_BIDIRECTIONAL

const (
	// If unspecified, Compute Engine default behavior will apply, which
	// is the same as
	// [INHERIT_FROM_SUBNETWORK][google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess.INHERIT_FROM_SUBNETWORK].
	GceClusterConfig_PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED GceClusterConfig_PrivateIpv6GoogleAccess = 0
	// Private access to and from Google Services configuration
	// inherited from the subnetwork configuration. This is the
	// default Compute Engine behavior.
	GceClusterConfig_INHERIT_FROM_SUBNETWORK GceClusterConfig_PrivateIpv6GoogleAccess = 1
	// Enables outbound private IPv6 access to Google Services from the Dataproc
	// cluster.
	GceClusterConfig_OUTBOUND GceClusterConfig_PrivateIpv6GoogleAccess = 2
	// Enables bidirectional private IPv6 access between Google Services and the
	// Dataproc cluster.
	GceClusterConfig_BIDIRECTIONAL GceClusterConfig_PrivateIpv6GoogleAccess = 3
)

func (GceClusterConfig_PrivateIpv6GoogleAccess) Descriptor

func (GceClusterConfig_PrivateIpv6GoogleAccess) Enum

func (GceClusterConfig_PrivateIpv6GoogleAccess) EnumDescriptor

func (GceClusterConfig_PrivateIpv6GoogleAccess) EnumDescriptor() ([]byte, []int)

Deprecated: Use GceClusterConfig_PrivateIpv6GoogleAccess.Descriptor instead.

func (GceClusterConfig_PrivateIpv6GoogleAccess) Number

func (GceClusterConfig_PrivateIpv6GoogleAccess) String

func (GceClusterConfig_PrivateIpv6GoogleAccess) Type

GetAutoscalingPolicyRequest

type GetAutoscalingPolicyRequest struct {

	// Required. The "resource name" of the autoscaling policy, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	//   - For `projects.regions.autoscalingPolicies.get`, the resource name
	//     of the policy has the following format:
	//     `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`
	//
	//   - For `projects.locations.autoscalingPolicies.get`, the resource name
	//     of the policy has the following format:
	//     `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// contains filtered or unexported fields
}

A request to fetch an autoscaling policy.

func (*GetAutoscalingPolicyRequest) Descriptor

func (*GetAutoscalingPolicyRequest) Descriptor() ([]byte, []int)

Deprecated: Use GetAutoscalingPolicyRequest.ProtoReflect.Descriptor instead.

func (*GetAutoscalingPolicyRequest) GetName

func (x *GetAutoscalingPolicyRequest) GetName() string

func (*GetAutoscalingPolicyRequest) ProtoMessage

func (*GetAutoscalingPolicyRequest) ProtoMessage()

func (*GetAutoscalingPolicyRequest) ProtoReflect

func (*GetAutoscalingPolicyRequest) Reset

func (x *GetAutoscalingPolicyRequest) Reset()

func (*GetAutoscalingPolicyRequest) String

func (x *GetAutoscalingPolicyRequest) String() string

GetBatchRequest

type GetBatchRequest struct {

	// Required. The name of the batch to retrieve.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// contains filtered or unexported fields
}

A request to get the resource representation for a batch workload.

func (*GetBatchRequest) Descriptor

func (*GetBatchRequest) Descriptor() ([]byte, []int)

Deprecated: Use GetBatchRequest.ProtoReflect.Descriptor instead.

func (*GetBatchRequest) GetName

func (x *GetBatchRequest) GetName() string

func (*GetBatchRequest) ProtoMessage

func (*GetBatchRequest) ProtoMessage()

func (*GetBatchRequest) ProtoReflect

func (x *GetBatchRequest) ProtoReflect() protoreflect.Message

func (*GetBatchRequest) Reset

func (x *GetBatchRequest) Reset()

func (*GetBatchRequest) String

func (x *GetBatchRequest) String() string

GetClusterRequest

type GetClusterRequest struct {

	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// contains filtered or unexported fields
}

Request to get the resource representation for a cluster in a project.

func (*GetClusterRequest) Descriptor

func (*GetClusterRequest) Descriptor() ([]byte, []int)

Deprecated: Use GetClusterRequest.ProtoReflect.Descriptor instead.

func (*GetClusterRequest) GetClusterName

func (x *GetClusterRequest) GetClusterName() string

func (*GetClusterRequest) GetProjectId

func (x *GetClusterRequest) GetProjectId() string

func (*GetClusterRequest) GetRegion

func (x *GetClusterRequest) GetRegion() string

func (*GetClusterRequest) ProtoMessage

func (*GetClusterRequest) ProtoMessage()

func (*GetClusterRequest) ProtoReflect

func (x *GetClusterRequest) ProtoReflect() protoreflect.Message

func (*GetClusterRequest) Reset

func (x *GetClusterRequest) Reset()

func (*GetClusterRequest) String

func (x *GetClusterRequest) String() string

GetJobRequest

type GetJobRequest struct {

	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job ID.
	JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	// contains filtered or unexported fields
}

A request to get the resource representation for a job in a project.

func (*GetJobRequest) Descriptor

func (*GetJobRequest) Descriptor() ([]byte, []int)

Deprecated: Use GetJobRequest.ProtoReflect.Descriptor instead.

func (*GetJobRequest) GetJobId

func (x *GetJobRequest) GetJobId() string

func (*GetJobRequest) GetProjectId

func (x *GetJobRequest) GetProjectId() string

func (*GetJobRequest) GetRegion

func (x *GetJobRequest) GetRegion() string

func (*GetJobRequest) ProtoMessage

func (*GetJobRequest) ProtoMessage()

func (*GetJobRequest) ProtoReflect

func (x *GetJobRequest) ProtoReflect() protoreflect.Message

func (*GetJobRequest) Reset

func (x *GetJobRequest) Reset()

func (*GetJobRequest) String

func (x *GetJobRequest) String() string

GetNodeGroupRequest

type GetNodeGroupRequest struct {

	// Required. The name of the node group to retrieve.
	// Format:
	// `projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// contains filtered or unexported fields
}

A request to get a node group .

func (*GetNodeGroupRequest) Descriptor

func (*GetNodeGroupRequest) Descriptor() ([]byte, []int)

Deprecated: Use GetNodeGroupRequest.ProtoReflect.Descriptor instead.

func (*GetNodeGroupRequest) GetName

func (x *GetNodeGroupRequest) GetName() string

func (*GetNodeGroupRequest) ProtoMessage

func (*GetNodeGroupRequest) ProtoMessage()

func (*GetNodeGroupRequest) ProtoReflect

func (x *GetNodeGroupRequest) ProtoReflect() protoreflect.Message

func (*GetNodeGroupRequest) Reset

func (x *GetNodeGroupRequest) Reset()

func (*GetNodeGroupRequest) String

func (x *GetNodeGroupRequest) String() string

GetWorkflowTemplateRequest

type GetWorkflowTemplateRequest struct {

	// Required. The resource name of the workflow template, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	//   - For `projects.regions.workflowTemplates.get`, the resource name of the
	//     template has the following format:
	//     `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
	//
	//   - For `projects.locations.workflowTemplates.get`, the resource name of the
	//     template has the following format:
	//     `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Optional. The version of workflow template to retrieve. Only previously
	// instantiated versions can be retrieved.
	//
	// If unspecified, retrieves the current version.
	Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
	// contains filtered or unexported fields
}

A request to fetch a workflow template.

func (*GetWorkflowTemplateRequest) Descriptor

func (*GetWorkflowTemplateRequest) Descriptor() ([]byte, []int)

Deprecated: Use GetWorkflowTemplateRequest.ProtoReflect.Descriptor instead.

func (*GetWorkflowTemplateRequest) GetName

func (x *GetWorkflowTemplateRequest) GetName() string

func (*GetWorkflowTemplateRequest) GetVersion

func (x *GetWorkflowTemplateRequest) GetVersion() int32

func (*GetWorkflowTemplateRequest) ProtoMessage

func (*GetWorkflowTemplateRequest) ProtoMessage()

func (*GetWorkflowTemplateRequest) ProtoReflect

func (*GetWorkflowTemplateRequest) Reset

func (x *GetWorkflowTemplateRequest) Reset()

func (*GetWorkflowTemplateRequest) String

func (x *GetWorkflowTemplateRequest) String() string

GkeClusterConfig

type GkeClusterConfig struct {

	// Optional. A target GKE cluster to deploy to. It must be in the same project and
	// region as the Dataproc cluster (the GKE cluster can be zonal or regional).
	// Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
	GkeClusterTarget string `protobuf:"bytes,2,opt,name=gke_cluster_target,json=gkeClusterTarget,proto3" json:"gke_cluster_target,omitempty"`
	// Optional. GKE NodePools where workloads will be scheduled. At least one node pool
	// must be assigned the 'default' role. Each role can be given to only a
	// single NodePoolTarget. All NodePools must have the same location settings.
	// If a nodePoolTarget is not specified, Dataproc constructs a default
	// nodePoolTarget.
	NodePoolTarget []*GkeNodePoolTarget `protobuf:"bytes,3,rep,name=node_pool_target,json=nodePoolTarget,proto3" json:"node_pool_target,omitempty"`
	// contains filtered or unexported fields
}

The cluster's GKE config.

func (*GkeClusterConfig) Descriptor

func (*GkeClusterConfig) Descriptor() ([]byte, []int)

Deprecated: Use GkeClusterConfig.ProtoReflect.Descriptor instead.

func (*GkeClusterConfig) GetGkeClusterTarget

func (x *GkeClusterConfig) GetGkeClusterTarget() string

func (*GkeClusterConfig) GetNodePoolTarget

func (x *GkeClusterConfig) GetNodePoolTarget() []*GkeNodePoolTarget

func (*GkeClusterConfig) ProtoMessage

func (*GkeClusterConfig) ProtoMessage()

func (*GkeClusterConfig) ProtoReflect

func (x *GkeClusterConfig) ProtoReflect() protoreflect.Message

func (*GkeClusterConfig) Reset

func (x *GkeClusterConfig) Reset()

func (*GkeClusterConfig) String

func (x *GkeClusterConfig) String() string

GkeNodePoolConfig

type GkeNodePoolConfig struct {

	// Optional. The node pool configuration.
	Config *GkeNodePoolConfig_GkeNodeConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
	// Optional. The list of Compute Engine
	// [zones](https://cloud.google.com/compute/docs/zones#available) where
	// NodePool's nodes will be located.
	//
	// **Note:** Currently, only one zone may be specified.
	//
	// If a location is not specified during NodePool creation, Dataproc will
	// choose a location.
	Locations []string `protobuf:"bytes,13,rep,name=locations,proto3" json:"locations,omitempty"`
	// Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
	// only when a valid configuration is present.
	Autoscaling *GkeNodePoolConfig_GkeNodePoolAutoscalingConfig `protobuf:"bytes,4,opt,name=autoscaling,proto3" json:"autoscaling,omitempty"`
	// contains filtered or unexported fields
}

The configuration of a GKE NodePool used by a Dataproc-on-GKE cluster.

func (*GkeNodePoolConfig) Descriptor

func (*GkeNodePoolConfig) Descriptor() ([]byte, []int)

Deprecated: Use GkeNodePoolConfig.ProtoReflect.Descriptor instead.

func (*GkeNodePoolConfig) GetAutoscaling

func (*GkeNodePoolConfig) GetConfig

func (*GkeNodePoolConfig) GetLocations

func (x *GkeNodePoolConfig) GetLocations() []string

func (*GkeNodePoolConfig) ProtoMessage

func (*GkeNodePoolConfig) ProtoMessage()

func (*GkeNodePoolConfig) ProtoReflect

func (x *GkeNodePoolConfig) ProtoReflect() protoreflect.Message

func (*GkeNodePoolConfig) Reset

func (x *GkeNodePoolConfig) Reset()

func (*GkeNodePoolConfig) String

func (x *GkeNodePoolConfig) String() string

GkeNodePoolConfig_GkeNodeConfig

type GkeNodePoolConfig_GkeNodeConfig struct {

	// Optional. The name of a Compute Engine [machine
	// type](https://cloud.google.com/compute/docs/machine-types).
	MachineType string `protobuf:"bytes,1,opt,name=machine_type,json=machineType,proto3" json:"machine_type,omitempty"`
	// Optional. Whether the nodes are created as [preemptible VM
	// instances](https://cloud.google.com/compute/docs/instances/preemptible).
	Preemptible bool `protobuf:"varint,10,opt,name=preemptible,proto3" json:"preemptible,omitempty"`
	// Optional. The number of local SSD disks to attach to the node, which is limited by
	// the maximum number of disks allowable per zone (see [Adding Local
	// SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)).
	LocalSsdCount int32 `protobuf:"varint,7,opt,name=local_ssd_count,json=localSsdCount,proto3" json:"local_ssd_count,omitempty"`
	// Optional. A list of [hardware
	// accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
	// each node.
	Accelerators []*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig `protobuf:"bytes,11,rep,name=accelerators,proto3" json:"accelerators,omitempty"`
	// Optional. [Minimum CPU
	// platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
	// to be used by this instance. The instance may be scheduled on the
	// specified or a newer CPU platform. Specify the friendly names of CPU
	// platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
	MinCpuPlatform string `protobuf:"bytes,13,opt,name=min_cpu_platform,json=minCpuPlatform,proto3" json:"min_cpu_platform,omitempty"`
	// contains filtered or unexported fields
}

Parameters that describe cluster nodes.

func (*GkeNodePoolConfig_GkeNodeConfig) Descriptor

func (*GkeNodePoolConfig_GkeNodeConfig) Descriptor() ([]byte, []int)

Deprecated: Use GkeNodePoolConfig_GkeNodeConfig.ProtoReflect.Descriptor instead.

func (*GkeNodePoolConfig_GkeNodeConfig) GetAccelerators

func (*GkeNodePoolConfig_GkeNodeConfig) GetLocalSsdCount

func (x *GkeNodePoolConfig_GkeNodeConfig) GetLocalSsdCount() int32

func (*GkeNodePoolConfig_GkeNodeConfig) GetMachineType

func (x *GkeNodePoolConfig_GkeNodeConfig) GetMachineType() string

func (*GkeNodePoolConfig_GkeNodeConfig) GetMinCpuPlatform

func (x *GkeNodePoolConfig_GkeNodeConfig) GetMinCpuPlatform() string

func (*GkeNodePoolConfig_GkeNodeConfig) GetPreemptible

func (x *GkeNodePoolConfig_GkeNodeConfig) GetPreemptible() bool

func (*GkeNodePoolConfig_GkeNodeConfig) ProtoMessage

func (*GkeNodePoolConfig_GkeNodeConfig) ProtoMessage()

func (*GkeNodePoolConfig_GkeNodeConfig) ProtoReflect

func (*GkeNodePoolConfig_GkeNodeConfig) Reset

func (*GkeNodePoolConfig_GkeNodeConfig) String

GkeNodePoolConfig_GkeNodePoolAcceleratorConfig

type GkeNodePoolConfig_GkeNodePoolAcceleratorConfig struct {

	// The number of accelerator cards exposed to an instance.
	AcceleratorCount int64 `protobuf:"varint,1,opt,name=accelerator_count,json=acceleratorCount,proto3" json:"accelerator_count,omitempty"`
	// The accelerator type resource namename (see GPUs on Compute Engine).
	AcceleratorType string `protobuf:"bytes,2,opt,name=accelerator_type,json=acceleratorType,proto3" json:"accelerator_type,omitempty"`
	// contains filtered or unexported fields
}

A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request for a NodePool.

func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) Descriptor

Deprecated: Use GkeNodePoolConfig_GkeNodePoolAcceleratorConfig.ProtoReflect.Descriptor instead.

func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) GetAcceleratorCount

func (x *GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) GetAcceleratorCount() int64

func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) GetAcceleratorType

func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) ProtoMessage

func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) ProtoReflect

func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) Reset

func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) String

GkeNodePoolConfig_GkeNodePoolAutoscalingConfig

type GkeNodePoolConfig_GkeNodePoolAutoscalingConfig struct {

	// The minimum number of nodes in the NodePool. Must be >= 0 and <= max_node_count.="" minnodecount="">int32 `protobuf:"varint,2,opt,name=min_node_count,json=minNodeCount,proto3" json:"min_node_count,omitempty"`
	// The maximum number of nodes in the NodePool. Must be >= min_node_count.
	// **Note:** Quota must be sufficient to scale up the cluster.
	MaxNodeCount int32 `protobuf:"varint,3,opt,name=max_node_count,json=maxNodeCount,proto3" json:"max_node_count,omitempty"`
	// contains filtered or unexported fields
}

GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage.

func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) Descriptor

Deprecated: Use GkeNodePoolConfig_GkeNodePoolAutoscalingConfig.ProtoReflect.Descriptor instead.

func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) GetMaxNodeCount

func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) GetMinNodeCount

func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) ProtoMessage

func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) ProtoReflect

func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) Reset

func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) String

GkeNodePoolTarget

type GkeNodePoolTarget struct {

	// Required. The target GKE NodePool.
	// Format:
	// 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
	NodePool string `protobuf:"bytes,1,opt,name=node_pool,json=nodePool,proto3" json:"node_pool,omitempty"`
	// Required. The types of role for a GKE NodePool
	Roles []GkeNodePoolTarget_Role `protobuf:"varint,2,rep,packed,name=roles,proto3,enum=google.cloud.dataproc.v1.GkeNodePoolTarget_Role" json:"roles,omitempty"`
	// Optional. The configuration for the GKE NodePool.
	//
	// If specified, Dataproc attempts to create a NodePool with the
	// specified shape. If one with the same name already exists, it is
	// verified against all specified fields. If a field differs, the
	// virtual cluster creation will fail.
	//
	// If omitted, any NodePool with the specified name is used. If a
	// NodePool with the specified name does not exist, Dataproc create a NodePool
	// with default values.
	NodePoolConfig *GkeNodePoolConfig `protobuf:"bytes,3,opt,name=node_pool_config,json=nodePoolConfig,proto3" json:"node_pool_config,omitempty"`
	// contains filtered or unexported fields
}

GKE NodePools that Dataproc workloads run on.

func (*GkeNodePoolTarget) Descriptor

func (*GkeNodePoolTarget) Descriptor() ([]byte, []int)

Deprecated: Use GkeNodePoolTarget.ProtoReflect.Descriptor instead.

func (*GkeNodePoolTarget) GetNodePool

func (x *GkeNodePoolTarget) GetNodePool() string

func (*GkeNodePoolTarget) GetNodePoolConfig

func (x *GkeNodePoolTarget) GetNodePoolConfig() *GkeNodePoolConfig

func (*GkeNodePoolTarget) GetRoles

func (x *GkeNodePoolTarget) GetRoles() []GkeNodePoolTarget_Role

func (*GkeNodePoolTarget) ProtoMessage

func (*GkeNodePoolTarget) ProtoMessage()

func (*GkeNodePoolTarget) ProtoReflect

func (x *GkeNodePoolTarget) ProtoReflect() protoreflect.Message

func (*GkeNodePoolTarget) Reset

func (x *GkeNodePoolTarget) Reset()

func (*GkeNodePoolTarget) String

func (x *GkeNodePoolTarget) String() string

GkeNodePoolTarget_Role

type GkeNodePoolTarget_Role int32

Role specifies whose tasks will run on the NodePool. The roles can be specific to workloads. Exactly one GkeNodePoolTarget within the VirtualCluster must have 'default' role, which is used to run all workloads that are not associated with a NodePool.

GkeNodePoolTarget_ROLE_UNSPECIFIED, GkeNodePoolTarget_DEFAULT, GkeNodePoolTarget_CONTROLLER, GkeNodePoolTarget_SPARK_DRIVER, GkeNodePoolTarget_SPARK_EXECUTOR

const (
	// Role is unspecified.
	GkeNodePoolTarget_ROLE_UNSPECIFIED GkeNodePoolTarget_Role = 0
	// Any roles that are not directly assigned to a NodePool run on the
	// `default` role's NodePool.
	GkeNodePoolTarget_DEFAULT GkeNodePoolTarget_Role = 1
	// Run controllers and webhooks.
	GkeNodePoolTarget_CONTROLLER GkeNodePoolTarget_Role = 2
	// Run spark driver.
	GkeNodePoolTarget_SPARK_DRIVER GkeNodePoolTarget_Role = 3
	// Run spark executors.
	GkeNodePoolTarget_SPARK_EXECUTOR GkeNodePoolTarget_Role = 4
)

func (GkeNodePoolTarget_Role) Descriptor

func (GkeNodePoolTarget_Role) Enum

func (GkeNodePoolTarget_Role) EnumDescriptor

func (GkeNodePoolTarget_Role) EnumDescriptor() ([]byte, []int)

Deprecated: Use GkeNodePoolTarget_Role.Descriptor instead.

func (GkeNodePoolTarget_Role) Number

func (GkeNodePoolTarget_Role) String

func (x GkeNodePoolTarget_Role) String() string

func (GkeNodePoolTarget_Role) Type

HadoopJob

type HadoopJob struct {
	Driver isHadoopJob_Driver `protobuf_oneof:"driver"`

	Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`

	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`

	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`

	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`

	Properties map[string]string "" /* 161 byte string literal not displayed */

	LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`

}

A Dataproc job for running Apache Hadoop MapReduce jobs on Apache Hadoop YARN.

func (*HadoopJob) Descriptor

func (*HadoopJob) Descriptor() ([]byte, []int)

Deprecated: Use HadoopJob.ProtoReflect.Descriptor instead.

func (*HadoopJob) GetArchiveUris

func (x *HadoopJob) GetArchiveUris() []string

func (*HadoopJob) GetArgs

func (x *HadoopJob) GetArgs() []string

func (*HadoopJob) GetDriver

func (m *HadoopJob) GetDriver() isHadoopJob_Driver

func (*HadoopJob) GetFileUris

func (x *HadoopJob) GetFileUris() []string

func (*HadoopJob) GetJarFileUris

func (x *HadoopJob) GetJarFileUris() []string

func (*HadoopJob) GetLoggingConfig

func (x *HadoopJob) GetLoggingConfig() *LoggingConfig

func (*HadoopJob) GetMainClass

func (x *HadoopJob) GetMainClass() string

func (*HadoopJob) GetMainJarFileUri

func (x *HadoopJob) GetMainJarFileUri() string

func (*HadoopJob) GetProperties

func (x *HadoopJob) GetProperties() map[string]string

func (*HadoopJob) ProtoMessage

func (*HadoopJob) ProtoMessage()

func (*HadoopJob) ProtoReflect

func (x *HadoopJob) ProtoReflect() protoreflect.Message

func (*HadoopJob) Reset

func (x *HadoopJob) Reset()

func (*HadoopJob) String

func (x *HadoopJob) String() string

HadoopJob_MainClass

type HadoopJob_MainClass struct {
	// The name of the driver's main class. The jar file containing the class
	// must be in the default CLASSPATH or specified in `jar_file_uris`.
	MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"`
}

HadoopJob_MainJarFileUri

type HadoopJob_MainJarFileUri struct {
	// The HCFS URI of the jar file containing the main class.
	// Examples:
	//
	//	'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
	//	'hdfs:/tmp/test-samples/custom-wordcount.jar'
	//	'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
	MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"`
}

HiveJob

type HiveJob struct {
	Queries isHiveJob_Queries `protobuf_oneof:"queries"`

	ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"`

	ScriptVariables map[string]string "" /* 194 byte string literal not displayed */

	Properties map[string]string "" /* 161 byte string literal not displayed */

	JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`

}

A Dataproc job for running Apache Hive queries on YARN.

func (*HiveJob) Descriptor

func (*HiveJob) Descriptor() ([]byte, []int)

Deprecated: Use HiveJob.ProtoReflect.Descriptor instead.

func (*HiveJob) GetContinueOnFailure

func (x *HiveJob) GetContinueOnFailure() bool

func (*HiveJob) GetJarFileUris

func (x *HiveJob) GetJarFileUris() []string

func (*HiveJob) GetProperties

func (x *HiveJob) GetProperties() map[string]string

func (*HiveJob) GetQueries

func (m *HiveJob) GetQueries() isHiveJob_Queries

func (*HiveJob) GetQueryFileUri

func (x *HiveJob) GetQueryFileUri() string

func (*HiveJob) GetQueryList

func (x *HiveJob) GetQueryList() *QueryList

func (*HiveJob) GetScriptVariables

func (x *HiveJob) GetScriptVariables() map[string]string

func (*HiveJob) ProtoMessage

func (*HiveJob) ProtoMessage()

func (*HiveJob) ProtoReflect

func (x *HiveJob) ProtoReflect() protoreflect.Message

func (*HiveJob) Reset

func (x *HiveJob) Reset()

func (*HiveJob) String

func (x *HiveJob) String() string

HiveJob_QueryFileUri

type HiveJob_QueryFileUri struct {
	// The HCFS URI of the script that contains Hive queries.
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}

HiveJob_QueryList

type HiveJob_QueryList struct {
	// A list of queries.
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}

IdentityConfig

type IdentityConfig struct {
	UserServiceAccountMapping map[string]string "" /* 228 byte string literal not displayed */

}

Identity related configuration, including service account based secure multi-tenancy user mappings.

func (*IdentityConfig) Descriptor

func (*IdentityConfig) Descriptor() ([]byte, []int)

Deprecated: Use IdentityConfig.ProtoReflect.Descriptor instead.

func (*IdentityConfig) GetUserServiceAccountMapping

func (x *IdentityConfig) GetUserServiceAccountMapping() map[string]string

func (*IdentityConfig) ProtoMessage

func (*IdentityConfig) ProtoMessage()

func (*IdentityConfig) ProtoReflect

func (x *IdentityConfig) ProtoReflect() protoreflect.Message

func (*IdentityConfig) Reset

func (x *IdentityConfig) Reset()

func (*IdentityConfig) String

func (x *IdentityConfig) String() string

InstanceGroupAutoscalingPolicyConfig

type InstanceGroupAutoscalingPolicyConfig struct {

	// Optional. Minimum number of instances for this group.
	//
	// Primary workers - Bounds: [2, max_instances]. Default: 2.
	// Secondary workers - Bounds: [0, max_instances]. Default: 0.
	MinInstances int32 `protobuf:"varint,1,opt,name=min_instances,json=minInstances,proto3" json:"min_instances,omitempty"`
	// Required. Maximum number of instances for this group. Required for primary
	// workers. Note that by default, clusters will not use secondary workers.
	// Required for secondary workers if the minimum secondary instances is set.
	//
	// Primary workers - Bounds: [min_instances, ).
	// Secondary workers - Bounds: [min_instances, ). Default: 0.
	MaxInstances int32 `protobuf:"varint,2,opt,name=max_instances,json=maxInstances,proto3" json:"max_instances,omitempty"`
	// Optional. Weight for the instance group, which is used to determine the
	// fraction of total workers in the cluster from this instance group.
	// For example, if primary workers have weight 2, and secondary workers have
	// weight 1, the cluster will have approximately 2 primary workers for each
	// secondary worker.
	//
	// The cluster may not reach the specified balance if constrained
	// by min/max bounds or other autoscaling settings. For example, if
	// `max_instances` for secondary workers is 0, then only primary workers will
	// be added. The cluster can also be out of balance when created.
	//
	// If weight is not set on any instance group, the cluster will default to
	// equal weight for all groups: the cluster will attempt to maintain an equal
	// number of workers in each group within the configured size bounds for each
	// group. If weight is set for one group only, the cluster will default to
	// zero weight on the unset group. For example if weight is set only on
	// primary workers, the cluster will use primary workers only and no
	// secondary workers.
	Weight int32 `protobuf:"varint,3,opt,name=weight,proto3" json:"weight,omitempty"`
	// contains filtered or unexported fields
}

Configuration for the size bounds of an instance group, including its proportional size to other groups.

func (*InstanceGroupAutoscalingPolicyConfig) Descriptor

func (*InstanceGroupAutoscalingPolicyConfig) Descriptor() ([]byte, []int)

Deprecated: Use InstanceGroupAutoscalingPolicyConfig.ProtoReflect.Descriptor instead.

func (*InstanceGroupAutoscalingPolicyConfig) GetMaxInstances

func (x *InstanceGroupAutoscalingPolicyConfig) GetMaxInstances() int32

func (*InstanceGroupAutoscalingPolicyConfig) GetMinInstances

func (x *InstanceGroupAutoscalingPolicyConfig) GetMinInstances() int32

func (*InstanceGroupAutoscalingPolicyConfig) GetWeight

func (*InstanceGroupAutoscalingPolicyConfig) ProtoMessage

func (*InstanceGroupAutoscalingPolicyConfig) ProtoMessage()

func (*InstanceGroupAutoscalingPolicyConfig) ProtoReflect

func (*InstanceGroupAutoscalingPolicyConfig) Reset

func (*InstanceGroupAutoscalingPolicyConfig) String

InstanceGroupConfig

type InstanceGroupConfig struct {
	NumInstances int32 `protobuf:"varint,1,opt,name=num_instances,json=numInstances,proto3" json:"num_instances,omitempty"`

	InstanceNames []string `protobuf:"bytes,2,rep,name=instance_names,json=instanceNames,proto3" json:"instance_names,omitempty"`

	ImageUri string `protobuf:"bytes,3,opt,name=image_uri,json=imageUri,proto3" json:"image_uri,omitempty"`

	MachineTypeUri string `protobuf:"bytes,4,opt,name=machine_type_uri,json=machineTypeUri,proto3" json:"machine_type_uri,omitempty"`

	DiskConfig *DiskConfig `protobuf:"bytes,5,opt,name=disk_config,json=diskConfig,proto3" json:"disk_config,omitempty"`

	IsPreemptible bool `protobuf:"varint,6,opt,name=is_preemptible,json=isPreemptible,proto3" json:"is_preemptible,omitempty"`

	Preemptibility InstanceGroupConfig_Preemptibility "" /* 148 byte string literal not displayed */

	ManagedGroupConfig *ManagedGroupConfig `protobuf:"bytes,7,opt,name=managed_group_config,json=managedGroupConfig,proto3" json:"managed_group_config,omitempty"`

	Accelerators []*AcceleratorConfig `protobuf:"bytes,8,rep,name=accelerators,proto3" json:"accelerators,omitempty"`

	MinCpuPlatform string `protobuf:"bytes,9,opt,name=min_cpu_platform,json=minCpuPlatform,proto3" json:"min_cpu_platform,omitempty"`

}

The config settings for Compute Engine resources in an instance group, such as a master or worker group.

func (*InstanceGroupConfig) Descriptor

func (*InstanceGroupConfig) Descriptor() ([]byte, []int)

Deprecated: Use InstanceGroupConfig.ProtoReflect.Descriptor instead.

func (*InstanceGroupConfig) GetAccelerators

func (x *InstanceGroupConfig) GetAccelerators() []*AcceleratorConfig

func (*InstanceGroupConfig) GetDiskConfig

func (x *InstanceGroupConfig) GetDiskConfig() *DiskConfig

func (*InstanceGroupConfig) GetImageUri

func (x *InstanceGroupConfig) GetImageUri() string

func (*InstanceGroupConfig) GetInstanceNames

func (x *InstanceGroupConfig) GetInstanceNames() []string

func (*InstanceGroupConfig) GetIsPreemptible

func (x *InstanceGroupConfig) GetIsPreemptible() bool

func (*InstanceGroupConfig) GetMachineTypeUri

func (x *InstanceGroupConfig) GetMachineTypeUri() string

func (*InstanceGroupConfig) GetManagedGroupConfig

func (x *InstanceGroupConfig) GetManagedGroupConfig() *ManagedGroupConfig

func (*InstanceGroupConfig) GetMinCpuPlatform

func (x *InstanceGroupConfig) GetMinCpuPlatform() string

func (*InstanceGroupConfig) GetNumInstances

func (x *InstanceGroupConfig) GetNumInstances() int32

func (*InstanceGroupConfig) GetPreemptibility

func (*InstanceGroupConfig) ProtoMessage

func (*InstanceGroupConfig) ProtoMessage()

func (*InstanceGroupConfig) ProtoReflect

func (x *InstanceGroupConfig) ProtoReflect() protoreflect.Message

func (*InstanceGroupConfig) Reset

func (x *InstanceGroupConfig) Reset()

func (*InstanceGroupConfig) String

func (x *InstanceGroupConfig) String() string

InstanceGroupConfig_Preemptibility

type InstanceGroupConfig_Preemptibility int32

Controls the use of preemptible instances within the group.

InstanceGroupConfig_PREEMPTIBILITY_UNSPECIFIED, InstanceGroupConfig_NON_PREEMPTIBLE, InstanceGroupConfig_PREEMPTIBLE

const (
	// Preemptibility is unspecified, the system will choose the
	// appropriate setting for each instance group.
	InstanceGroupConfig_PREEMPTIBILITY_UNSPECIFIED InstanceGroupConfig_Preemptibility = 0
	// Instances are non-preemptible.
	//
	// This option is allowed for all instance groups and is the only valid
	// value for Master and Worker instance groups.
	InstanceGroupConfig_NON_PREEMPTIBLE InstanceGroupConfig_Preemptibility = 1
	// Instances are [preemptible]
	// (https://cloud.google.com/compute/docs/instances/preemptible).
	//
	// This option is allowed only for [secondary worker]
	// (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms)
	// groups.
	InstanceGroupConfig_PREEMPTIBLE InstanceGroupConfig_Preemptibility = 2
)

func (InstanceGroupConfig_Preemptibility) Descriptor

func (InstanceGroupConfig_Preemptibility) Enum

func (InstanceGroupConfig_Preemptibility) EnumDescriptor

func (InstanceGroupConfig_Preemptibility) EnumDescriptor() ([]byte, []int)

Deprecated: Use InstanceGroupConfig_Preemptibility.Descriptor instead.

func (InstanceGroupConfig_Preemptibility) Number

func (InstanceGroupConfig_Preemptibility) String

func (InstanceGroupConfig_Preemptibility) Type

InstantiateInlineWorkflowTemplateRequest

type InstantiateInlineWorkflowTemplateRequest struct {

	// Required. The resource name of the region or location, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	//   - For `projects.regions.workflowTemplates,instantiateinline`, the resource
	//     name of the region has the following format:
	//     `projects/{project_id}/regions/{region}`
	//
	//   - For `projects.locations.workflowTemplates.instantiateinline`, the
	//     resource name of the location has the following format:
	//     `projects/{project_id}/locations/{location}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The workflow template to instantiate.
	Template *WorkflowTemplate `protobuf:"bytes,2,opt,name=template,proto3" json:"template,omitempty"`
	// Optional. A tag that prevents multiple concurrent workflow
	// instances with the same tag from running. This mitigates risk of
	// concurrent instances started due to retries.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The tag must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	// contains filtered or unexported fields
}

A request to instantiate an inline workflow template.

func (*InstantiateInlineWorkflowTemplateRequest) Descriptor

func (*InstantiateInlineWorkflowTemplateRequest) Descriptor() ([]byte, []int)

Deprecated: Use InstantiateInlineWorkflowTemplateRequest.ProtoReflect.Descriptor instead.

func (*InstantiateInlineWorkflowTemplateRequest) GetParent

func (*InstantiateInlineWorkflowTemplateRequest) GetRequestId

func (*InstantiateInlineWorkflowTemplateRequest) GetTemplate

func (*InstantiateInlineWorkflowTemplateRequest) ProtoMessage

func (*InstantiateInlineWorkflowTemplateRequest) ProtoReflect

func (*InstantiateInlineWorkflowTemplateRequest) Reset

func (*InstantiateInlineWorkflowTemplateRequest) String

InstantiateWorkflowTemplateRequest

type InstantiateWorkflowTemplateRequest struct {
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`

	Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`

	RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`

	Parameters map[string]string "" /* 161 byte string literal not displayed */

}

A request to instantiate a workflow template.

func (*InstantiateWorkflowTemplateRequest) Descriptor

func (*InstantiateWorkflowTemplateRequest) Descriptor() ([]byte, []int)

Deprecated: Use InstantiateWorkflowTemplateRequest.ProtoReflect.Descriptor instead.

func (*InstantiateWorkflowTemplateRequest) GetName

func (*InstantiateWorkflowTemplateRequest) GetParameters

func (x *InstantiateWorkflowTemplateRequest) GetParameters() map[string]string

func (*InstantiateWorkflowTemplateRequest) GetRequestId

func (x *InstantiateWorkflowTemplateRequest) GetRequestId() string

func (*InstantiateWorkflowTemplateRequest) GetVersion

func (x *InstantiateWorkflowTemplateRequest) GetVersion() int32

func (*InstantiateWorkflowTemplateRequest) ProtoMessage

func (*InstantiateWorkflowTemplateRequest) ProtoMessage()

func (*InstantiateWorkflowTemplateRequest) ProtoReflect

func (*InstantiateWorkflowTemplateRequest) Reset

func (*InstantiateWorkflowTemplateRequest) String

Job

type Job struct {
	Reference *JobReference `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"`

	Placement *JobPlacement `protobuf:"bytes,2,opt,name=placement,proto3" json:"placement,omitempty"`

	TypeJob isJob_TypeJob `protobuf_oneof:"type_job"`

	Status *JobStatus `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"`

	StatusHistory []*JobStatus `protobuf:"bytes,13,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`

	YarnApplications []*YarnApplication `protobuf:"bytes,9,rep,name=yarn_applications,json=yarnApplications,proto3" json:"yarn_applications,omitempty"`

	DriverOutputResourceUri string "" /* 135 byte string literal not displayed */

	DriverControlFilesUri string "" /* 129 byte string literal not displayed */

	Labels map[string]string "" /* 154 byte string literal not displayed */

	Scheduling *JobScheduling `protobuf:"bytes,20,opt,name=scheduling,proto3" json:"scheduling,omitempty"`

	JobUuid string `protobuf:"bytes,22,opt,name=job_uuid,json=jobUuid,proto3" json:"job_uuid,omitempty"`

	Done bool `protobuf:"varint,24,opt,name=done,proto3" json:"done,omitempty"`

	DriverSchedulingConfig *DriverSchedulingConfig "" /* 130 byte string literal not displayed */

}

A Dataproc job resource.

func (*Job) Descriptor

func (*Job) Descriptor() ([]byte, []int)

Deprecated: Use Job.ProtoReflect.Descriptor instead.

func (*Job) GetDone

func (x *Job) GetDone() bool

func (*Job) GetDriverControlFilesUri

func (x *Job) GetDriverControlFilesUri() string

func (*Job) GetDriverOutputResourceUri

func (x *Job) GetDriverOutputResourceUri() string

func (*Job) GetDriverSchedulingConfig

func (x *Job) GetDriverSchedulingConfig() *DriverSchedulingConfig

func (*Job) GetHadoopJob

func (x *Job) GetHadoopJob() *HadoopJob

func (*Job) GetHiveJob

func (x *Job) GetHiveJob() *HiveJob

func (*Job) GetJobUuid

func (x *Job) GetJobUuid() string

func (*Job) GetLabels

func (x *Job) GetLabels() map[string]string

func (*Job) GetPigJob

func (x *Job) GetPigJob() *PigJob

func (*Job) GetPlacement

func (x *Job) GetPlacement() *JobPlacement

func (*Job) GetPrestoJob

func (x *Job) GetPrestoJob() *PrestoJob

func (*Job) GetPysparkJob

func (x *Job) GetPysparkJob() *PySparkJob

func (*Job) GetReference

func (x *Job) GetReference() *JobReference

func (*Job) GetScheduling

func (x *Job) GetScheduling() *JobScheduling

func (*Job) GetSparkJob

func (x *Job) GetSparkJob() *SparkJob

func (*Job) GetSparkRJob

func (x *Job) GetSparkRJob() *SparkRJob

func (*Job) GetSparkSqlJob

func (x *Job) GetSparkSqlJob() *SparkSqlJob

func (*Job) GetStatus

func (x *Job) GetStatus() *JobStatus

func (*Job) GetStatusHistory

func (x *Job) GetStatusHistory() []*JobStatus

func (*Job) GetTypeJob

func (m *Job) GetTypeJob() isJob_TypeJob

func (*Job) GetYarnApplications

func (x *Job) GetYarnApplications() []*YarnApplication

func (*Job) ProtoMessage

func (*Job) ProtoMessage()

func (*Job) ProtoReflect

func (x *Job) ProtoReflect() protoreflect.Message

func (*Job) Reset

func (x *Job) Reset()

func (*Job) String

func (x *Job) String() string

JobControllerClient

type JobControllerClient interface {
	// Submits a job to a cluster.
	SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Submits job to a cluster.
	SubmitJobAsOperation(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Gets the resource representation for a job in a project.
	GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Lists regions/{region}/jobs in a project.
	ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error)
	// Updates a job in a project.
	UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Starts a job cancellation request. To access the job resource
	// after cancellation, call
	// [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
	// or
	// [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
	CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Deletes the job from the project. If the job is active, the delete fails,
	// and the response returns `FAILED_PRECONDITION`.
	DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}

JobControllerClient is the client API for JobController service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewJobControllerClient

func NewJobControllerClient(cc grpc.ClientConnInterface) JobControllerClient

JobControllerServer

type JobControllerServer interface {
	// Submits a job to a cluster.
	SubmitJob(context.Context, *SubmitJobRequest) (*Job, error)
	// Submits job to a cluster.
	SubmitJobAsOperation(context.Context, *SubmitJobRequest) (*longrunning.Operation, error)
	// Gets the resource representation for a job in a project.
	GetJob(context.Context, *GetJobRequest) (*Job, error)
	// Lists regions/{region}/jobs in a project.
	ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error)
	// Updates a job in a project.
	UpdateJob(context.Context, *UpdateJobRequest) (*Job, error)
	// Starts a job cancellation request. To access the job resource
	// after cancellation, call
	// [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
	// or
	// [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
	CancelJob(context.Context, *CancelJobRequest) (*Job, error)
	// Deletes the job from the project. If the job is active, the delete fails,
	// and the response returns `FAILED_PRECONDITION`.
	DeleteJob(context.Context, *DeleteJobRequest) (*emptypb.Empty, error)
}

JobControllerServer is the server API for JobController service.

JobMetadata

type JobMetadata struct {

	// Output only. The job id.
	JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	// Output only. Most recent job status.
	Status *JobStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
	// Output only. Operation type.
	OperationType string `protobuf:"bytes,3,opt,name=operation_type,json=operationType,proto3" json:"operation_type,omitempty"`
	// Output only. Job submission time.
	StartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
	// contains filtered or unexported fields
}

Job Operation metadata.

func (*JobMetadata) Descriptor

func (*JobMetadata) Descriptor() ([]byte, []int)

Deprecated: Use JobMetadata.ProtoReflect.Descriptor instead.

func (*JobMetadata) GetJobId

func (x *JobMetadata) GetJobId() string

func (*JobMetadata) GetOperationType

func (x *JobMetadata) GetOperationType() string

func (*JobMetadata) GetStartTime

func (x *JobMetadata) GetStartTime() *timestamppb.Timestamp

func (*JobMetadata) GetStatus

func (x *JobMetadata) GetStatus() *JobStatus

func (*JobMetadata) ProtoMessage

func (*JobMetadata) ProtoMessage()

func (*JobMetadata) ProtoReflect

func (x *JobMetadata) ProtoReflect() protoreflect.Message

func (*JobMetadata) Reset

func (x *JobMetadata) Reset()

func (*JobMetadata) String

func (x *JobMetadata) String() string

JobPlacement

type JobPlacement struct {
	ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`

	ClusterUuid string `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`

	ClusterLabels map[string]string "" /* 188 byte string literal not displayed */

}

Dataproc job config.

func (*JobPlacement) Descriptor

func (*JobPlacement) Descriptor() ([]byte, []int)

Deprecated: Use JobPlacement.ProtoReflect.Descriptor instead.

func (*JobPlacement) GetClusterLabels

func (x *JobPlacement) GetClusterLabels() map[string]string

func (*JobPlacement) GetClusterName

func (x *JobPlacement) GetClusterName() string

func (*JobPlacement) GetClusterUuid

func (x *JobPlacement) GetClusterUuid() string

func (*JobPlacement) ProtoMessage

func (*JobPlacement) ProtoMessage()

func (*JobPlacement) ProtoReflect

func (x *JobPlacement) ProtoReflect() protoreflect.Message

func (*JobPlacement) Reset

func (x *JobPlacement) Reset()

func (*JobPlacement) String

func (x *JobPlacement) String() string

JobReference

type JobReference struct {

	// Optional. The ID of the Google Cloud Platform project that the job belongs
	// to. If specified, must match the request project ID.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Optional. The job ID, which must be unique within the project.
	//
	// The ID must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), or hyphens (-). The maximum length is 100 characters.
	//
	// If not specified by the caller, the job ID will be provided by the server.
	JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	// contains filtered or unexported fields
}

Encapsulates the full scoping used to reference a job.

func (*JobReference) Descriptor

func (*JobReference) Descriptor() ([]byte, []int)

Deprecated: Use JobReference.ProtoReflect.Descriptor instead.

func (*JobReference) GetJobId

func (x *JobReference) GetJobId() string

func (*JobReference) GetProjectId

func (x *JobReference) GetProjectId() string

func (*JobReference) ProtoMessage

func (*JobReference) ProtoMessage()

func (*JobReference) ProtoReflect

func (x *JobReference) ProtoReflect() protoreflect.Message

func (*JobReference) Reset

func (x *JobReference) Reset()

func (*JobReference) String

func (x *JobReference) String() string

JobScheduling

type JobScheduling struct {

	// Optional. Maximum number of times per hour a driver may be restarted as
	// a result of driver exiting with non-zero code before job is
	// reported failed.
	//
	// A job may be reported as thrashing if the driver exits with a non-zero code
	// four times within a 10-minute window.
	//
	// Maximum value is 10.
	//
	// **Note:** This restartable job option is not supported in Dataproc
	// [workflow templates]
	// (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).
	MaxFailuresPerHour int32 `protobuf:"varint,1,opt,name=max_failures_per_hour,json=maxFailuresPerHour,proto3" json:"max_failures_per_hour,omitempty"`
	// Optional. Maximum total number of times a driver may be restarted as a
	// result of the driver exiting with a non-zero code. After the maximum number
	// is reached, the job will be reported as failed.
	//
	// Maximum value is 240.
	//
	// **Note:** Currently, this restartable job option is
	// not supported in Dataproc
	// [workflow
	// templates](https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).
	MaxFailuresTotal int32 `protobuf:"varint,2,opt,name=max_failures_total,json=maxFailuresTotal,proto3" json:"max_failures_total,omitempty"`
	// contains filtered or unexported fields
}

Job scheduling options.

func (*JobScheduling) Descriptor

func (*JobScheduling) Descriptor() ([]byte, []int)

Deprecated: Use JobScheduling.ProtoReflect.Descriptor instead.

func (*JobScheduling) GetMaxFailuresPerHour

func (x *JobScheduling) GetMaxFailuresPerHour() int32

func (*JobScheduling) GetMaxFailuresTotal

func (x *JobScheduling) GetMaxFailuresTotal() int32

func (*JobScheduling) ProtoMessage

func (*JobScheduling) ProtoMessage()

func (*JobScheduling) ProtoReflect

func (x *JobScheduling) ProtoReflect() protoreflect.Message

func (*JobScheduling) Reset

func (x *JobScheduling) Reset()

func (*JobScheduling) String

func (x *JobScheduling) String() string

JobStatus

type JobStatus struct {

	// Output only. A state message specifying the overall job state.
	State JobStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1.JobStatus_State" json:"state,omitempty"`
	// Optional. Output only. Job state details, such as an error
	// description if the state is ERROR.
	Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"`
	// Output only. The time when this state was entered.
	StateStartTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
	// Output only. Additional state information, which includes
	// status reported by the agent.
	Substate JobStatus_Substate `protobuf:"varint,7,opt,name=substate,proto3,enum=google.cloud.dataproc.v1.JobStatus_Substate" json:"substate,omitempty"`
	// contains filtered or unexported fields
}

Dataproc job status.

func (*JobStatus) Descriptor

func (*JobStatus) Descriptor() ([]byte, []int)

Deprecated: Use JobStatus.ProtoReflect.Descriptor instead.

func (*JobStatus) GetDetails

func (x *JobStatus) GetDetails() string

func (*JobStatus) GetState

func (x *JobStatus) GetState() JobStatus_State

func (*JobStatus) GetStateStartTime

func (x *JobStatus) GetStateStartTime() *timestamppb.Timestamp

func (*JobStatus) GetSubstate

func (x *JobStatus) GetSubstate() JobStatus_Substate

func (*JobStatus) ProtoMessage

func (*JobStatus) ProtoMessage()

func (*JobStatus) ProtoReflect

func (x *JobStatus) ProtoReflect() protoreflect.Message

func (*JobStatus) Reset

func (x *JobStatus) Reset()

func (*JobStatus) String

func (x *JobStatus) String() string

JobStatus_State

type JobStatus_State int32

The job state.

JobStatus_STATE_UNSPECIFIED, JobStatus_PENDING, JobStatus_SETUP_DONE, JobStatus_RUNNING, JobStatus_CANCEL_PENDING, JobStatus_CANCEL_STARTED, JobStatus_CANCELLED, JobStatus_DONE, JobStatus_ERROR, JobStatus_ATTEMPT_FAILURE

const (
	// The job state is unknown.
	JobStatus_STATE_UNSPECIFIED JobStatus_State = 0
	// The job is pending; it has been submitted, but is not yet running.
	JobStatus_PENDING JobStatus_State = 1
	// Job has been received by the service and completed initial setup;
	// it will soon be submitted to the cluster.
	JobStatus_SETUP_DONE JobStatus_State = 8
	// The job is running on the cluster.
	JobStatus_RUNNING JobStatus_State = 2
	// A CancelJob request has been received, but is pending.
	JobStatus_CANCEL_PENDING JobStatus_State = 3
	// Transient in-flight resources have been canceled, and the request to
	// cancel the running job has been issued to the cluster.
	JobStatus_CANCEL_STARTED JobStatus_State = 7
	// The job cancellation was successful.
	JobStatus_CANCELLED JobStatus_State = 4
	// The job has completed successfully.
	JobStatus_DONE JobStatus_State = 5
	// The job has completed, but encountered an error.
	JobStatus_ERROR JobStatus_State = 6
	// Job attempt has failed. The detail field contains failure details for
	// this attempt.
	//
	// Applies to restartable jobs only.
	JobStatus_ATTEMPT_FAILURE JobStatus_State = 9
)

func (JobStatus_State) Descriptor

func (JobStatus_State) Enum

func (x JobStatus_State) Enum() *JobStatus_State

func (JobStatus_State) EnumDescriptor

func (JobStatus_State) EnumDescriptor() ([]byte, []int)

Deprecated: Use JobStatus_State.Descriptor instead.

func (JobStatus_State) Number

func (JobStatus_State) String

func (x JobStatus_State) String() string

func (JobStatus_State) Type

JobStatus_Substate

type JobStatus_Substate int32

The job substate.

JobStatus_UNSPECIFIED, JobStatus_SUBMITTED, JobStatus_QUEUED, JobStatus_STALE_STATUS

const (
	// The job substate is unknown.
	JobStatus_UNSPECIFIED JobStatus_Substate = 0
	// The Job is submitted to the agent.
	//
	// Applies to RUNNING state.
	JobStatus_SUBMITTED JobStatus_Substate = 1
	// The Job has been received and is awaiting execution (it may be waiting
	// for a condition to be met). See the "details" field for the reason for
	// the delay.
	//
	// Applies to RUNNING state.
	JobStatus_QUEUED JobStatus_Substate = 2
	// The agent-reported status is out of date, which may be caused by a
	// loss of communication between the agent and Dataproc. If the
	// agent does not send a timely update, the job will fail.
	//
	// Applies to RUNNING state.
	JobStatus_STALE_STATUS JobStatus_Substate = 3
)

func (JobStatus_Substate) Descriptor

func (JobStatus_Substate) Enum

func (JobStatus_Substate) EnumDescriptor

func (JobStatus_Substate) EnumDescriptor() ([]byte, []int)

Deprecated: Use JobStatus_Substate.Descriptor instead.

func (JobStatus_Substate) Number

func (JobStatus_Substate) String

func (x JobStatus_Substate) String() string

func (JobStatus_Substate) Type

Job_HadoopJob

type Job_HadoopJob struct {
	// Optional. Job is a Hadoop job.
	HadoopJob *HadoopJob `protobuf:"bytes,3,opt,name=hadoop_job,json=hadoopJob,proto3,oneof"`
}

Job_HiveJob

type Job_HiveJob struct {
	// Optional. Job is a Hive job.
	HiveJob *HiveJob `protobuf:"bytes,6,opt,name=hive_job,json=hiveJob,proto3,oneof"`
}

Job_PigJob

type Job_PigJob struct {
	// Optional. Job is a Pig job.
	PigJob *PigJob `protobuf:"bytes,7,opt,name=pig_job,json=pigJob,proto3,oneof"`
}

Job_PrestoJob

type Job_PrestoJob struct {
	// Optional. Job is a Presto job.
	PrestoJob *PrestoJob `protobuf:"bytes,23,opt,name=presto_job,json=prestoJob,proto3,oneof"`
}

Job_PysparkJob

type Job_PysparkJob struct {
	// Optional. Job is a PySpark job.
	PysparkJob *PySparkJob `protobuf:"bytes,5,opt,name=pyspark_job,json=pysparkJob,proto3,oneof"`
}

Job_SparkJob

type Job_SparkJob struct {
	// Optional. Job is a Spark job.
	SparkJob *SparkJob `protobuf:"bytes,4,opt,name=spark_job,json=sparkJob,proto3,oneof"`
}

Job_SparkRJob

type Job_SparkRJob struct {
	// Optional. Job is a SparkR job.
	SparkRJob *SparkRJob `protobuf:"bytes,21,opt,name=spark_r_job,json=sparkRJob,proto3,oneof"`
}

Job_SparkSqlJob

type Job_SparkSqlJob struct {
	// Optional. Job is a SparkSql job.
	SparkSqlJob *SparkSqlJob `protobuf:"bytes,12,opt,name=spark_sql_job,json=sparkSqlJob,proto3,oneof"`
}

KerberosConfig

type KerberosConfig struct {
	EnableKerberos bool `protobuf:"varint,1,opt,name=enable_kerberos,json=enableKerberos,proto3" json:"enable_kerberos,omitempty"`

	RootPrincipalPasswordUri string "" /* 137 byte string literal not displayed */

	KmsKeyUri string `protobuf:"bytes,3,opt,name=kms_key_uri,json=kmsKeyUri,proto3" json:"kms_key_uri,omitempty"`

	KeystoreUri string `protobuf:"bytes,4,opt,name=keystore_uri,json=keystoreUri,proto3" json:"keystore_uri,omitempty"`

	TruststoreUri string `protobuf:"bytes,5,opt,name=truststore_uri,json=truststoreUri,proto3" json:"truststore_uri,omitempty"`

	KeystorePasswordUri string `protobuf:"bytes,6,opt,name=keystore_password_uri,json=keystorePasswordUri,proto3" json:"keystore_password_uri,omitempty"`

	KeyPasswordUri string `protobuf:"bytes,7,opt,name=key_password_uri,json=keyPasswordUri,proto3" json:"key_password_uri,omitempty"`

	TruststorePasswordUri string `protobuf:"bytes,8,opt,name=truststore_password_uri,json=truststorePasswordUri,proto3" json:"truststore_password_uri,omitempty"`

	CrossRealmTrustRealm string `protobuf:"bytes,9,opt,name=cross_realm_trust_realm,json=crossRealmTrustRealm,proto3" json:"cross_realm_trust_realm,omitempty"`

	CrossRealmTrustKdc string `protobuf:"bytes,10,opt,name=cross_realm_trust_kdc,json=crossRealmTrustKdc,proto3" json:"cross_realm_trust_kdc,omitempty"`

	CrossRealmTrustAdminServer string "" /* 146 byte string literal not displayed */

	CrossRealmTrustSharedPasswordUri string "" /* 166 byte string literal not displayed */

	KdcDbKeyUri string `protobuf:"bytes,13,opt,name=kdc_db_key_uri,json=kdcDbKeyUri,proto3" json:"kdc_db_key_uri,omitempty"`

	TgtLifetimeHours int32 `protobuf:"varint,14,opt,name=tgt_lifetime_hours,json=tgtLifetimeHours,proto3" json:"tgt_lifetime_hours,omitempty"`

	Realm string `protobuf:"bytes,15,opt,name=realm,proto3" json:"realm,omitempty"`

}

Specifies Kerberos related configuration.

func (*KerberosConfig) Descriptor

func (*KerberosConfig) Descriptor() ([]byte, []int)

Deprecated: Use KerberosConfig.ProtoReflect.Descriptor instead.

func (*KerberosConfig) GetCrossRealmTrustAdminServer

func (x *KerberosConfig) GetCrossRealmTrustAdminServer() string

func (*KerberosConfig) GetCrossRealmTrustKdc

func (x *KerberosConfig) GetCrossRealmTrustKdc() string

func (*KerberosConfig) GetCrossRealmTrustRealm

func (x *KerberosConfig) GetCrossRealmTrustRealm() string

func (*KerberosConfig) GetCrossRealmTrustSharedPasswordUri

func (x *KerberosConfig) GetCrossRealmTrustSharedPasswordUri() string

func (*KerberosConfig) GetEnableKerberos

func (x *KerberosConfig) GetEnableKerberos() bool

func (*KerberosConfig) GetKdcDbKeyUri

func (x *KerberosConfig) GetKdcDbKeyUri() string

func (*KerberosConfig) GetKeyPasswordUri

func (x *KerberosConfig) GetKeyPasswordUri() string

func (*KerberosConfig) GetKeystorePasswordUri

func (x *KerberosConfig) GetKeystorePasswordUri() string

func (*KerberosConfig) GetKeystoreUri

func (x *KerberosConfig) GetKeystoreUri() string

func (*KerberosConfig) GetKmsKeyUri

func (x *KerberosConfig) GetKmsKeyUri() string

func (*KerberosConfig) GetRealm

func (x *KerberosConfig) GetRealm() string

func (*KerberosConfig) GetRootPrincipalPasswordUri

func (x *KerberosConfig) GetRootPrincipalPasswordUri() string

func (*KerberosConfig) GetTgtLifetimeHours

func (x *KerberosConfig) GetTgtLifetimeHours() int32

func (*KerberosConfig) GetTruststorePasswordUri

func (x *KerberosConfig) GetTruststorePasswordUri() string

func (*KerberosConfig) GetTruststoreUri

func (x *KerberosConfig) GetTruststoreUri() string

func (*KerberosConfig) ProtoMessage

func (*KerberosConfig) ProtoMessage()

func (*KerberosConfig) ProtoReflect

func (x *KerberosConfig) ProtoReflect() protoreflect.Message

func (*KerberosConfig) Reset

func (x *KerberosConfig) Reset()

func (*KerberosConfig) String

func (x *KerberosConfig) String() string

KubernetesClusterConfig

type KubernetesClusterConfig struct {
	KubernetesNamespace string `protobuf:"bytes,1,opt,name=kubernetes_namespace,json=kubernetesNamespace,proto3" json:"kubernetes_namespace,omitempty"`

	Config isKubernetesClusterConfig_Config `protobuf_oneof:"config"`

	KubernetesSoftwareConfig *KubernetesSoftwareConfig "" /* 135 byte string literal not displayed */

}

The configuration for running the Dataproc cluster on Kubernetes.

func (*KubernetesClusterConfig) Descriptor

func (*KubernetesClusterConfig) Descriptor() ([]byte, []int)

Deprecated: Use KubernetesClusterConfig.ProtoReflect.Descriptor instead.

func (*KubernetesClusterConfig) GetConfig

func (m *KubernetesClusterConfig) GetConfig() isKubernetesClusterConfig_Config

func (*KubernetesClusterConfig) GetGkeClusterConfig

func (x *KubernetesClusterConfig) GetGkeClusterConfig() *GkeClusterConfig

func (*KubernetesClusterConfig) GetKubernetesNamespace

func (x *KubernetesClusterConfig) GetKubernetesNamespace() string

func (*KubernetesClusterConfig) GetKubernetesSoftwareConfig

func (x *KubernetesClusterConfig) GetKubernetesSoftwareConfig() *KubernetesSoftwareConfig

func (*KubernetesClusterConfig) ProtoMessage

func (*KubernetesClusterConfig) ProtoMessage()

func (*KubernetesClusterConfig) ProtoReflect

func (x *KubernetesClusterConfig) ProtoReflect() protoreflect.Message

func (*KubernetesClusterConfig) Reset

func (x *KubernetesClusterConfig) Reset()

func (*KubernetesClusterConfig) String

func (x *KubernetesClusterConfig) String() string

KubernetesClusterConfig_GkeClusterConfig

type KubernetesClusterConfig_GkeClusterConfig struct {
	// Required. The configuration for running the Dataproc cluster on GKE.
	GkeClusterConfig *GkeClusterConfig `protobuf:"bytes,2,opt,name=gke_cluster_config,json=gkeClusterConfig,proto3,oneof"`
}

KubernetesSoftwareConfig

type KubernetesSoftwareConfig struct {
	ComponentVersion map[string]string "" /* 197 byte string literal not displayed */

	Properties map[string]string "" /* 161 byte string literal not displayed */

}

The software configuration for this Dataproc cluster running on Kubernetes.

func (*KubernetesSoftwareConfig) Descriptor

func (*KubernetesSoftwareConfig) Descriptor() ([]byte, []int)

Deprecated: Use KubernetesSoftwareConfig.ProtoReflect.Descriptor instead.

func (*KubernetesSoftwareConfig) GetComponentVersion

func (x *KubernetesSoftwareConfig) GetComponentVersion() map[string]string

func (*KubernetesSoftwareConfig) GetProperties

func (x *KubernetesSoftwareConfig) GetProperties() map[string]string

func (*KubernetesSoftwareConfig) ProtoMessage

func (*KubernetesSoftwareConfig) ProtoMessage()

func (*KubernetesSoftwareConfig) ProtoReflect

func (x *KubernetesSoftwareConfig) ProtoReflect() protoreflect.Message

func (*KubernetesSoftwareConfig) Reset

func (x *KubernetesSoftwareConfig) Reset()

func (*KubernetesSoftwareConfig) String

func (x *KubernetesSoftwareConfig) String() string

LifecycleConfig

type LifecycleConfig struct {

	// Optional. The duration to keep the cluster alive while idling (when no jobs
	// are running). Passing this threshold will cause the cluster to be
	// deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
	// representation of
	// [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
	IdleDeleteTtl *durationpb.Duration `protobuf:"bytes,1,opt,name=idle_delete_ttl,json=idleDeleteTtl,proto3" json:"idle_delete_ttl,omitempty"`
	// Either the exact time the cluster should be deleted at or
	// the cluster maximum age.
	//
	// Types that are assignable to Ttl:
	//
	//	*LifecycleConfig_AutoDeleteTime
	//	*LifecycleConfig_AutoDeleteTtl
	Ttl isLifecycleConfig_Ttl `protobuf_oneof:"ttl"`
	// Output only. The time when cluster became idle (most recent job finished)
	// and became eligible for deletion due to idleness (see JSON representation
	// of
	// [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
	IdleStartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=idle_start_time,json=idleStartTime,proto3" json:"idle_start_time,omitempty"`
	// contains filtered or unexported fields
}

Specifies the cluster auto-delete schedule configuration.

func (*LifecycleConfig) Descriptor

func (*LifecycleConfig) Descriptor() ([]byte, []int)

Deprecated: Use LifecycleConfig.ProtoReflect.Descriptor instead.

func (*LifecycleConfig) GetAutoDeleteTime

func (x *LifecycleConfig) GetAutoDeleteTime() *timestamppb.Timestamp

func (*LifecycleConfig) GetAutoDeleteTtl

func (x *LifecycleConfig) GetAutoDeleteTtl() *durationpb.Duration

func (*LifecycleConfig) GetIdleDeleteTtl

func (x *LifecycleConfig) GetIdleDeleteTtl() *durationpb.Duration

func (*LifecycleConfig) GetIdleStartTime

func (x *LifecycleConfig) GetIdleStartTime() *timestamppb.Timestamp

func (*LifecycleConfig) GetTtl

func (m *LifecycleConfig) GetTtl() isLifecycleConfig_Ttl

func (*LifecycleConfig) ProtoMessage

func (*LifecycleConfig) ProtoMessage()

func (*LifecycleConfig) ProtoReflect

func (x *LifecycleConfig) ProtoReflect() protoreflect.Message

func (*LifecycleConfig) Reset

func (x *LifecycleConfig) Reset()

func (*LifecycleConfig) String

func (x *LifecycleConfig) String() string

LifecycleConfig_AutoDeleteTime

type LifecycleConfig_AutoDeleteTime struct {
	// Optional. The time when cluster will be auto-deleted (see JSON
	// representation of
	// [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
	AutoDeleteTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=auto_delete_time,json=autoDeleteTime,proto3,oneof"`
}

LifecycleConfig_AutoDeleteTtl

type LifecycleConfig_AutoDeleteTtl struct {
	// Optional. The lifetime duration of cluster. The cluster will be
	// auto-deleted at the end of this period. Minimum value is 10 minutes;
	// maximum value is 14 days (see JSON representation of
	// [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
	AutoDeleteTtl *durationpb.Duration `protobuf:"bytes,3,opt,name=auto_delete_ttl,json=autoDeleteTtl,proto3,oneof"`
}

ListAutoscalingPoliciesRequest

type ListAutoscalingPoliciesRequest struct {

	// Required. The "resource name" of the region or location, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	//   - For `projects.regions.autoscalingPolicies.list`, the resource name
	//     of the region has the following format:
	//     `projects/{project_id}/regions/{region}`
	//
	//   - For `projects.locations.autoscalingPolicies.list`, the resource name
	//     of the location has the following format:
	//     `projects/{project_id}/locations/{location}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Optional. The maximum number of results to return in each response.
	// Must be less than or equal to 1000. Defaults to 100.
	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
	// Optional. The page token, returned by a previous call, to request the
	// next page of results.
	PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
	// contains filtered or unexported fields
}

A request to list autoscaling policies in a project.

func (*ListAutoscalingPoliciesRequest) Descriptor

func (*ListAutoscalingPoliciesRequest) Descriptor() ([]byte, []int)

Deprecated: Use ListAutoscalingPoliciesRequest.ProtoReflect.Descriptor instead.

func (*ListAutoscalingPoliciesRequest) GetPageSize

func (x *ListAutoscalingPoliciesRequest) GetPageSize() int32

func (*ListAutoscalingPoliciesRequest) GetPageToken

func (x *ListAutoscalingPoliciesRequest) GetPageToken() string

func (*ListAutoscalingPoliciesRequest) GetParent

func (x *ListAutoscalingPoliciesRequest) GetParent() string

func (*ListAutoscalingPoliciesRequest) ProtoMessage

func (*ListAutoscalingPoliciesRequest) ProtoMessage()

func (*ListAutoscalingPoliciesRequest) ProtoReflect

func (*ListAutoscalingPoliciesRequest) Reset

func (x *ListAutoscalingPoliciesRequest) Reset()

func (*ListAutoscalingPoliciesRequest) String

ListAutoscalingPoliciesResponse

type ListAutoscalingPoliciesResponse struct {

	// Output only. Autoscaling policies list.
	Policies []*AutoscalingPolicy `protobuf:"bytes,1,rep,name=policies,proto3" json:"policies,omitempty"`
	// Output only. This token is included in the response if there are more
	// results to fetch.
	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
	// contains filtered or unexported fields
}

A response to a request to list autoscaling policies in a project.

func (*ListAutoscalingPoliciesResponse) Descriptor

func (*ListAutoscalingPoliciesResponse) Descriptor() ([]byte, []int)

Deprecated: Use ListAutoscalingPoliciesResponse.ProtoReflect.Descriptor instead.

func (*ListAutoscalingPoliciesResponse) GetNextPageToken

func (x *ListAutoscalingPoliciesResponse) GetNextPageToken() string

func (*ListAutoscalingPoliciesResponse) GetPolicies

func (*ListAutoscalingPoliciesResponse) ProtoMessage

func (*ListAutoscalingPoliciesResponse) ProtoMessage()

func (*ListAutoscalingPoliciesResponse) ProtoReflect

func (*ListAutoscalingPoliciesResponse) Reset

func (*ListAutoscalingPoliciesResponse) String

ListBatchesRequest

type ListBatchesRequest struct {

	// Required. The parent, which owns this collection of batches.
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Optional. The maximum number of batches to return in each response.
	// The service may return fewer than this value.
	// The default page size is 20; the maximum page size is 1000.
	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
	// Optional. A page token received from a previous `ListBatches` call.
	// Provide this token to retrieve the subsequent page.
	PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
	// contains filtered or unexported fields
}

A request to list batch workloads in a project.

func (*ListBatchesRequest) Descriptor

func (*ListBatchesRequest) Descriptor() ([]byte, []int)

Deprecated: Use ListBatchesRequest.ProtoReflect.Descriptor instead.

func (*ListBatchesRequest) GetPageSize

func (x *ListBatchesRequest) GetPageSize() int32

func (*ListBatchesRequest) GetPageToken

func (x *ListBatchesRequest) GetPageToken() string

func (*ListBatchesRequest) GetParent

func (x *ListBatchesRequest) GetParent() string

func (*ListBatchesRequest) ProtoMessage

func (*ListBatchesRequest) ProtoMessage()

func (*ListBatchesRequest) ProtoReflect

func (x *ListBatchesRequest) ProtoReflect() protoreflect.Message

func (*ListBatchesRequest) Reset

func (x *ListBatchesRequest) Reset()

func (*ListBatchesRequest) String

func (x *ListBatchesRequest) String() string

ListBatchesResponse

type ListBatchesResponse struct {

	// The batches from the specified collection.
	Batches []*Batch `protobuf:"bytes,1,rep,name=batches,proto3" json:"batches,omitempty"`
	// A token, which can be sent as `page_token` to retrieve the next page.
	// If this field is omitted, there are no subsequent pages.
	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
	// contains filtered or unexported fields
}

A list of batch workloads.

func (*ListBatchesResponse) Descriptor

func (*ListBatchesResponse) Descriptor() ([]byte, []int)

Deprecated: Use ListBatchesResponse.ProtoReflect.Descriptor instead.

func (*ListBatchesResponse) GetBatches

func (x *ListBatchesResponse) GetBatches() []*Batch

func (*ListBatchesResponse) GetNextPageToken

func (x *ListBatchesResponse) GetNextPageToken() string

func (*ListBatchesResponse) ProtoMessage

func (*ListBatchesResponse) ProtoMessage()

func (*ListBatchesResponse) ProtoReflect

func (x *ListBatchesResponse) ProtoReflect() protoreflect.Message

func (*ListBatchesResponse) Reset

func (x *ListBatchesResponse) Reset()

func (*ListBatchesResponse) String

func (x *ListBatchesResponse) String() string

ListClustersRequest

type ListClustersRequest struct {

	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,4,opt,name=region,proto3" json:"region,omitempty"`
	// Optional. A filter constraining the clusters to list. Filters are
	// case-sensitive and have the following syntax:
	//
	// field = value [AND [field = value]] ...
	//
	// where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
	// and `[KEY]` is a label key. **value** can be `*` to match all values.
	// `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
	// `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
	// contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
	// contains the `DELETING` and `ERROR` states.
	// `clusterName` is the name of the cluster provided at creation time.
	// Only the logical `AND` operator is supported; space-separated items are
	// treated as having an implicit `AND` operator.
	//
	// Example filter:
	//
	// status.state = ACTIVE AND clusterName = mycluster
	// AND labels.env = staging AND labels.starred = *
	Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
	// Optional. The standard List page size.
	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
	// Optional. The standard List page token.
	PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
	// contains filtered or unexported fields
}

A request to list the clusters in a project.

func (*ListClustersRequest) Descriptor

func (*ListClustersRequest) Descriptor() ([]byte, []int)

Deprecated: Use ListClustersRequest.ProtoReflect.Descriptor instead.

func (*ListClustersRequest) GetFilter

func (x *ListClustersRequest) GetFilter() string

func (*ListClustersRequest) GetPageSize

func (x *ListClustersRequest) GetPageSize() int32

func (*ListClustersRequest) GetPageToken

func (x *ListClustersRequest) GetPageToken() string

func (*ListClustersRequest) GetProjectId

func (x *ListClustersRequest) GetProjectId() string

func (*ListClustersRequest) GetRegion

func (x *ListClustersRequest) GetRegion() string

func (*ListClustersRequest) ProtoMessage

func (*ListClustersRequest) ProtoMessage()

func (*ListClustersRequest) ProtoReflect

func (x *ListClustersRequest) ProtoReflect() protoreflect.Message

func (*ListClustersRequest) Reset

func (x *ListClustersRequest) Reset()

func (*ListClustersRequest) String

func (x *ListClustersRequest) String() string

ListClustersResponse

type ListClustersResponse struct {

	// Output only. The clusters in the project.
	Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"`
	// Output only. This token is included in the response if there are more
	// results to fetch. To fetch additional results, provide this value as the
	// `page_token` in a subsequent `ListClustersRequest`.
	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
	// contains filtered or unexported fields
}

The list of all clusters in a project.

func (*ListClustersResponse) Descriptor

func (*ListClustersResponse) Descriptor() ([]byte, []int)

Deprecated: Use ListClustersResponse.ProtoReflect.Descriptor instead.

func (*ListClustersResponse) GetClusters

func (x *ListClustersResponse) GetClusters() []*Cluster

func (*ListClustersResponse) GetNextPageToken

func (x *ListClustersResponse) GetNextPageToken() string

func (*ListClustersResponse) ProtoMessage

func (*ListClustersResponse) ProtoMessage()

func (*ListClustersResponse) ProtoReflect

func (x *ListClustersResponse) ProtoReflect() protoreflect.Message

func (*ListClustersResponse) Reset

func (x *ListClustersResponse) Reset()

func (*ListClustersResponse) String

func (x *ListClustersResponse) String() string

ListJobsRequest

type ListJobsRequest struct {
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`

	Region string `protobuf:"bytes,6,opt,name=region,proto3" json:"region,omitempty"`

	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`

	PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`

	ClusterName string `protobuf:"bytes,4,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`

	JobStateMatcher ListJobsRequest_JobStateMatcher "" /* 171 byte string literal not displayed */

	Filter string `protobuf:"bytes,7,opt,name=filter,proto3" json:"filter,omitempty"`

}

A request to list jobs in a project.

func (*ListJobsRequest) Descriptor

func (*ListJobsRequest) Descriptor() ([]byte, []int)

Deprecated: Use ListJobsRequest.ProtoReflect.Descriptor instead.

func (*ListJobsRequest) GetClusterName

func (x *ListJobsRequest) GetClusterName() string

func (*ListJobsRequest) GetFilter

func (x *ListJobsRequest) GetFilter() string

func (*ListJobsRequest) GetJobStateMatcher

func (x *ListJobsRequest) GetJobStateMatcher() ListJobsRequest_JobStateMatcher

func (*ListJobsRequest) GetPageSize

func (x *ListJobsRequest) GetPageSize() int32

func (*ListJobsRequest) GetPageToken

func (x *ListJobsRequest) GetPageToken() string

func (*ListJobsRequest) GetProjectId

func (x *ListJobsRequest) GetProjectId() string

func (*ListJobsRequest) GetRegion

func (x *ListJobsRequest) GetRegion() string

func (*ListJobsRequest) ProtoMessage

func (*ListJobsRequest) ProtoMessage()

func (*ListJobsRequest) ProtoReflect

func (x *ListJobsRequest) ProtoReflect() protoreflect.Message

func (*ListJobsRequest) Reset

func (x *ListJobsRequest) Reset()

func (*ListJobsRequest) String

func (x *ListJobsRequest) String() string

ListJobsRequest_JobStateMatcher

type ListJobsRequest_JobStateMatcher int32

A matcher that specifies categories of job states.

ListJobsRequest_ALL, ListJobsRequest_ACTIVE, ListJobsRequest_NON_ACTIVE

const (
	// Match all jobs, regardless of state.
	ListJobsRequest_ALL ListJobsRequest_JobStateMatcher = 0
	// Only match jobs in non-terminal states: PENDING, RUNNING, or
	// CANCEL_PENDING.
	ListJobsRequest_ACTIVE ListJobsRequest_JobStateMatcher = 1
	// Only match jobs in terminal states: CANCELLED, DONE, or ERROR.
	ListJobsRequest_NON_ACTIVE ListJobsRequest_JobStateMatcher = 2
)

func (ListJobsRequest_JobStateMatcher) Descriptor

func (ListJobsRequest_JobStateMatcher) Enum

func (ListJobsRequest_JobStateMatcher) EnumDescriptor

func (ListJobsRequest_JobStateMatcher) EnumDescriptor() ([]byte, []int)

Deprecated: Use ListJobsRequest_JobStateMatcher.Descriptor instead.

func (ListJobsRequest_JobStateMatcher) Number

func (ListJobsRequest_JobStateMatcher) String

func (ListJobsRequest_JobStateMatcher) Type

ListJobsResponse

type ListJobsResponse struct {

	// Output only. Jobs list.
	Jobs []*Job `protobuf:"bytes,1,rep,name=jobs,proto3" json:"jobs,omitempty"`
	// Optional. This token is included in the response if there are more results
	// to fetch. To fetch additional results, provide this value as the
	// `page_token` in a subsequent ListJobsRequest.
	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
	// contains filtered or unexported fields
}

A list of jobs in a project.

func (*ListJobsResponse) Descriptor

func (*ListJobsResponse) Descriptor() ([]byte, []int)

Deprecated: Use ListJobsResponse.ProtoReflect.Descriptor instead.

func (*ListJobsResponse) GetJobs

func (x *ListJobsResponse) GetJobs() []*Job

func (*ListJobsResponse) GetNextPageToken

func (x *ListJobsResponse) GetNextPageToken() string

func (*ListJobsResponse) ProtoMessage

func (*ListJobsResponse) ProtoMessage()

func (*ListJobsResponse) ProtoReflect

func (x *ListJobsResponse) ProtoReflect() protoreflect.Message

func (*ListJobsResponse) Reset

func (x *ListJobsResponse) Reset()

func (*ListJobsResponse) String

func (x *ListJobsResponse) String() string

ListWorkflowTemplatesRequest

type ListWorkflowTemplatesRequest struct {

	// Required. The resource name of the region or location, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	//   - For `projects.regions.workflowTemplates,list`, the resource
	//     name of the region has the following format:
	//     `projects/{project_id}/regions/{region}`
	//
	//   - For `projects.locations.workflowTemplates.list`, the
	//     resource name of the location has the following format:
	//     `projects/{project_id}/locations/{location}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Optional. The maximum number of results to return in each response.
	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
	// Optional. The page token, returned by a previous call, to request the
	// next page of results.
	PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
	// contains filtered or unexported fields
}

A request to list workflow templates in a project.

func (*ListWorkflowTemplatesRequest) Descriptor

func (*ListWorkflowTemplatesRequest) Descriptor() ([]byte, []int)

Deprecated: Use ListWorkflowTemplatesRequest.ProtoReflect.Descriptor instead.

func (*ListWorkflowTemplatesRequest) GetPageSize

func (x *ListWorkflowTemplatesRequest) GetPageSize() int32

func (*ListWorkflowTemplatesRequest) GetPageToken

func (x *ListWorkflowTemplatesRequest) GetPageToken() string

func (*ListWorkflowTemplatesRequest) GetParent

func (x *ListWorkflowTemplatesRequest) GetParent() string

func (*ListWorkflowTemplatesRequest) ProtoMessage

func (*ListWorkflowTemplatesRequest) ProtoMessage()

func (*ListWorkflowTemplatesRequest) ProtoReflect

func (*ListWorkflowTemplatesRequest) Reset

func (x *ListWorkflowTemplatesRequest) Reset()

func (*ListWorkflowTemplatesRequest) String

ListWorkflowTemplatesResponse

type ListWorkflowTemplatesResponse struct {

	// Output only. WorkflowTemplates list.
	Templates []*WorkflowTemplate `protobuf:"bytes,1,rep,name=templates,proto3" json:"templates,omitempty"`
	// Output only. This token is included in the response if there are more
	// results to fetch. To fetch additional results, provide this value as the
	// page_token in a subsequent ListWorkflowTemplatesRequest.
	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
	// contains filtered or unexported fields
}

A response to a request to list workflow templates in a project.

func (*ListWorkflowTemplatesResponse) Descriptor

func (*ListWorkflowTemplatesResponse) Descriptor() ([]byte, []int)

Deprecated: Use ListWorkflowTemplatesResponse.ProtoReflect.Descriptor instead.

func (*ListWorkflowTemplatesResponse) GetNextPageToken

func (x *ListWorkflowTemplatesResponse) GetNextPageToken() string

func (*ListWorkflowTemplatesResponse) GetTemplates

func (x *ListWorkflowTemplatesResponse) GetTemplates() []*WorkflowTemplate

func (*ListWorkflowTemplatesResponse) ProtoMessage

func (*ListWorkflowTemplatesResponse) ProtoMessage()

func (*ListWorkflowTemplatesResponse) ProtoReflect

func (*ListWorkflowTemplatesResponse) Reset

func (x *ListWorkflowTemplatesResponse) Reset()

func (*ListWorkflowTemplatesResponse) String

LoggingConfig

type LoggingConfig struct {
	DriverLogLevels map[string]LoggingConfig_Level "" /* 247 byte string literal not displayed */

}

The runtime logging config of the job.

func (*LoggingConfig) Descriptor

func (*LoggingConfig) Descriptor() ([]byte, []int)

Deprecated: Use LoggingConfig.ProtoReflect.Descriptor instead.

func (*LoggingConfig) GetDriverLogLevels

func (x *LoggingConfig) GetDriverLogLevels() map[string]LoggingConfig_Level

func (*LoggingConfig) ProtoMessage

func (*LoggingConfig) ProtoMessage()

func (*LoggingConfig) ProtoReflect

func (x *LoggingConfig) ProtoReflect() protoreflect.Message

func (*LoggingConfig) Reset

func (x *LoggingConfig) Reset()

func (*LoggingConfig) String

func (x *LoggingConfig) String() string

LoggingConfig_Level

type LoggingConfig_Level int32

The Log4j level for job execution. When running an Apache Hive job, Cloud Dataproc configures the Hive client to an equivalent verbosity level.

LoggingConfig_LEVEL_UNSPECIFIED, LoggingConfig_ALL, LoggingConfig_TRACE, LoggingConfig_DEBUG, LoggingConfig_INFO, LoggingConfig_WARN, LoggingConfig_ERROR, LoggingConfig_FATAL, LoggingConfig_OFF

const (
	// Level is unspecified. Use default level for log4j.
	LoggingConfig_LEVEL_UNSPECIFIED LoggingConfig_Level = 0
	// Use ALL level for log4j.
	LoggingConfig_ALL LoggingConfig_Level = 1
	// Use TRACE level for log4j.
	LoggingConfig_TRACE LoggingConfig_Level = 2
	// Use DEBUG level for log4j.
	LoggingConfig_DEBUG LoggingConfig_Level = 3
	// Use INFO level for log4j.
	LoggingConfig_INFO LoggingConfig_Level = 4
	// Use WARN level for log4j.
	LoggingConfig_WARN LoggingConfig_Level = 5
	// Use ERROR level for log4j.
	LoggingConfig_ERROR LoggingConfig_Level = 6
	// Use FATAL level for log4j.
	LoggingConfig_FATAL LoggingConfig_Level = 7
	// Turn off log4j.
	LoggingConfig_OFF LoggingConfig_Level = 8
)

func (LoggingConfig_Level) Descriptor

func (LoggingConfig_Level) Enum

func (LoggingConfig_Level) EnumDescriptor

func (LoggingConfig_Level) EnumDescriptor() ([]byte, []int)

Deprecated: Use LoggingConfig_Level.Descriptor instead.

func (LoggingConfig_Level) Number

func (LoggingConfig_Level) String

func (x LoggingConfig_Level) String() string

func (LoggingConfig_Level) Type

ManagedCluster

type ManagedCluster struct {
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`

	Config *ClusterConfig `protobuf:"bytes,3,opt,name=config,proto3" json:"config,omitempty"`

	Labels map[string]string "" /* 153 byte string literal not displayed */

}

Cluster that is managed by the workflow.

func (*ManagedCluster) Descriptor

func (*ManagedCluster) Descriptor() ([]byte, []int)

Deprecated: Use ManagedCluster.ProtoReflect.Descriptor instead.

func (*ManagedCluster) GetClusterName

func (x *ManagedCluster) GetClusterName() string

func (*ManagedCluster) GetConfig

func (x *ManagedCluster) GetConfig() *ClusterConfig

func (*ManagedCluster) GetLabels

func (x *ManagedCluster) GetLabels() map[string]string

func (*ManagedCluster) ProtoMessage

func (*ManagedCluster) ProtoMessage()

func (*ManagedCluster) ProtoReflect

func (x *ManagedCluster) ProtoReflect() protoreflect.Message

func (*ManagedCluster) Reset

func (x *ManagedCluster) Reset()

func (*ManagedCluster) String

func (x *ManagedCluster) String() string

ManagedGroupConfig

type ManagedGroupConfig struct {
	InstanceTemplateName string `protobuf:"bytes,1,opt,name=instance_template_name,json=instanceTemplateName,proto3" json:"instance_template_name,omitempty"`

	InstanceGroupManagerName string "" /* 137 byte string literal not displayed */

}

Specifies the resources used to actively manage an instance group.

func (*ManagedGroupConfig) Descriptor

func (*ManagedGroupConfig) Descriptor() ([]byte, []int)

Deprecated: Use ManagedGroupConfig.ProtoReflect.Descriptor instead.

func (*ManagedGroupConfig) GetInstanceGroupManagerName

func (x *ManagedGroupConfig) GetInstanceGroupManagerName() string

func (*ManagedGroupConfig) GetInstanceTemplateName

func (x *ManagedGroupConfig) GetInstanceTemplateName() string

func (*ManagedGroupConfig) ProtoMessage

func (*ManagedGroupConfig) ProtoMessage()

func (*ManagedGroupConfig) ProtoReflect

func (x *ManagedGroupConfig) ProtoReflect() protoreflect.Message

func (*ManagedGroupConfig) Reset

func (x *ManagedGroupConfig) Reset()

func (*ManagedGroupConfig) String

func (x *ManagedGroupConfig) String() string

MetastoreConfig

type MetastoreConfig struct {
	DataprocMetastoreService string "" /* 135 byte string literal not displayed */

}

Specifies a Metastore configuration.

func (*MetastoreConfig) Descriptor

func (*MetastoreConfig) Descriptor() ([]byte, []int)

Deprecated: Use MetastoreConfig.ProtoReflect.Descriptor instead.

func (*MetastoreConfig) GetDataprocMetastoreService

func (x *MetastoreConfig) GetDataprocMetastoreService() string

func (*MetastoreConfig) ProtoMessage

func (*MetastoreConfig) ProtoMessage()

func (*MetastoreConfig) ProtoReflect

func (x *MetastoreConfig) ProtoReflect() protoreflect.Message

func (*MetastoreConfig) Reset

func (x *MetastoreConfig) Reset()

func (*MetastoreConfig) String

func (x *MetastoreConfig) String() string

NodeGroup

type NodeGroup struct {
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`

	Roles []NodeGroup_Role `protobuf:"varint,2,rep,packed,name=roles,proto3,enum=google.cloud.dataproc.v1.NodeGroup_Role" json:"roles,omitempty"`

	NodeGroupConfig *InstanceGroupConfig `protobuf:"bytes,3,opt,name=node_group_config,json=nodeGroupConfig,proto3" json:"node_group_config,omitempty"`

	Labels map[string]string "" /* 153 byte string literal not displayed */

}

Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc [NodeGroupAffinity][google.cloud.dataproc.v1.NodeGroupAffinity] resource.

func (*NodeGroup) Descriptor

func (*NodeGroup) Descriptor() ([]byte, []int)

Deprecated: Use NodeGroup.ProtoReflect.Descriptor instead.

func (*NodeGroup) GetLabels

func (x *NodeGroup) GetLabels() map[string]string

func (*NodeGroup) GetName

func (x *NodeGroup) GetName() string

func (*NodeGroup) GetNodeGroupConfig

func (x *NodeGroup) GetNodeGroupConfig() *InstanceGroupConfig

func (*NodeGroup) GetRoles

func (x *NodeGroup) GetRoles() []NodeGroup_Role

func (*NodeGroup) ProtoMessage

func (*NodeGroup) ProtoMessage()

func (*NodeGroup) ProtoReflect

func (x *NodeGroup) ProtoReflect() protoreflect.Message

func (*NodeGroup) Reset

func (x *NodeGroup) Reset()

func (*NodeGroup) String

func (x *NodeGroup) String() string

NodeGroupAffinity

type NodeGroupAffinity struct {

	// Required. The URI of a
	// sole-tenant [node group
	// resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)
	// that the cluster will be created on.
	//
	// A full URL, partial URI, or node group name are valid. Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
	// * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`
	// * `node-group-1`
	NodeGroupUri string `protobuf:"bytes,1,opt,name=node_group_uri,json=nodeGroupUri,proto3" json:"node_group_uri,omitempty"`
	// contains filtered or unexported fields
}

Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc [NodeGroup][google.cloud.dataproc.v1.NodeGroup] resource.

func (*NodeGroupAffinity) Descriptor

func (*NodeGroupAffinity) Descriptor() ([]byte, []int)

Deprecated: Use NodeGroupAffinity.ProtoReflect.Descriptor instead.

func (*NodeGroupAffinity) GetNodeGroupUri

func (x *NodeGroupAffinity) GetNodeGroupUri() string

func (*NodeGroupAffinity) ProtoMessage

func (*NodeGroupAffinity) ProtoMessage()

func (*NodeGroupAffinity) ProtoReflect

func (x *NodeGroupAffinity) ProtoReflect() protoreflect.Message

func (*NodeGroupAffinity) Reset

func (x *NodeGroupAffinity) Reset()

func (*NodeGroupAffinity) String

func (x *NodeGroupAffinity) String() string

NodeGroupControllerClient

type NodeGroupControllerClient interface {
	// Creates a node group in a cluster. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] is
	// [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).
	CreateNodeGroup(ctx context.Context, in *CreateNodeGroupRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Resizes a node group in a cluster. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] is
	// [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).
	ResizeNodeGroup(ctx context.Context, in *ResizeNodeGroupRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Gets the resource representation for a node group in a
	// cluster.
	GetNodeGroup(ctx context.Context, in *GetNodeGroupRequest, opts ...grpc.CallOption) (*NodeGroup, error)
}

NodeGroupControllerClient is the client API for NodeGroupController service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewNodeGroupControllerClient

func NewNodeGroupControllerClient(cc grpc.ClientConnInterface) NodeGroupControllerClient

NodeGroupControllerServer

type NodeGroupControllerServer interface {
	// Creates a node group in a cluster. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] is
	// [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).
	CreateNodeGroup(context.Context, *CreateNodeGroupRequest) (*longrunning.Operation, error)
	// Resizes a node group in a cluster. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] is
	// [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).
	ResizeNodeGroup(context.Context, *ResizeNodeGroupRequest) (*longrunning.Operation, error)
	// Gets the resource representation for a node group in a
	// cluster.
	GetNodeGroup(context.Context, *GetNodeGroupRequest) (*NodeGroup, error)
}

NodeGroupControllerServer is the server API for NodeGroupController service.

NodeGroupOperationMetadata

type NodeGroupOperationMetadata struct {
	NodeGroupId string `protobuf:"bytes,1,opt,name=node_group_id,json=nodeGroupId,proto3" json:"node_group_id,omitempty"`

	ClusterUuid string `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`

	Status *ClusterOperationStatus `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`

	StatusHistory []*ClusterOperationStatus `protobuf:"bytes,4,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`

	OperationType NodeGroupOperationMetadata_NodeGroupOperationType "" /* 181 byte string literal not displayed */

	Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`

	Labels map[string]string "" /* 153 byte string literal not displayed */

	Warnings []string `protobuf:"bytes,8,rep,name=warnings,proto3" json:"warnings,omitempty"`

}

Metadata describing the node group operation.

func (*NodeGroupOperationMetadata) Descriptor

func (*NodeGroupOperationMetadata) Descriptor() ([]byte, []int)

Deprecated: Use NodeGroupOperationMetadata.ProtoReflect.Descriptor instead.

func (*NodeGroupOperationMetadata) GetClusterUuid

func (x *NodeGroupOperationMetadata) GetClusterUuid() string

func (*NodeGroupOperationMetadata) GetDescription

func (x *NodeGroupOperationMetadata) GetDescription() string

func (*NodeGroupOperationMetadata) GetLabels

func (x *NodeGroupOperationMetadata) GetLabels() map[string]string

func (*NodeGroupOperationMetadata) GetNodeGroupId

func (x *NodeGroupOperationMetadata) GetNodeGroupId() string

func (*NodeGroupOperationMetadata) GetOperationType

func (*NodeGroupOperationMetadata) GetStatus

func (*NodeGroupOperationMetadata) GetStatusHistory

func (x *NodeGroupOperationMetadata) GetStatusHistory() []*ClusterOperationStatus

func (*NodeGroupOperationMetadata) GetWarnings

func (x *NodeGroupOperationMetadata) GetWarnings() []string

func (*NodeGroupOperationMetadata) ProtoMessage

func (*NodeGroupOperationMetadata) ProtoMessage()

func (*NodeGroupOperationMetadata) ProtoReflect

func (*NodeGroupOperationMetadata) Reset

func (x *NodeGroupOperationMetadata) Reset()

func (*NodeGroupOperationMetadata) String

func (x *NodeGroupOperationMetadata) String() string

NodeGroupOperationMetadata_NodeGroupOperationType

type NodeGroupOperationMetadata_NodeGroupOperationType int32

Operation type for node group resources.

NodeGroupOperationMetadata_NODE_GROUP_OPERATION_TYPE_UNSPECIFIED, NodeGroupOperationMetadata_CREATE, NodeGroupOperationMetadata_UPDATE, NodeGroupOperationMetadata_DELETE, NodeGroupOperationMetadata_RESIZE

const (
	// Node group operation type is unknown.
	NodeGroupOperationMetadata_NODE_GROUP_OPERATION_TYPE_UNSPECIFIED NodeGroupOperationMetadata_NodeGroupOperationType = 0
	// Create node group operation type.
	NodeGroupOperationMetadata_CREATE NodeGroupOperationMetadata_NodeGroupOperationType = 1
	// Update node group operation type.
	NodeGroupOperationMetadata_UPDATE NodeGroupOperationMetadata_NodeGroupOperationType = 2
	// Delete node group operation type.
	NodeGroupOperationMetadata_DELETE NodeGroupOperationMetadata_NodeGroupOperationType = 3
	// Resize node group operation type.
	NodeGroupOperationMetadata_RESIZE NodeGroupOperationMetadata_NodeGroupOperationType = 4
)

func (NodeGroupOperationMetadata_NodeGroupOperationType) Descriptor

func (NodeGroupOperationMetadata_NodeGroupOperationType) Enum

func (NodeGroupOperationMetadata_NodeGroupOperationType) EnumDescriptor

Deprecated: Use NodeGroupOperationMetadata_NodeGroupOperationType.Descriptor instead.

func (NodeGroupOperationMetadata_NodeGroupOperationType) Number

func (NodeGroupOperationMetadata_NodeGroupOperationType) String

func (NodeGroupOperationMetadata_NodeGroupOperationType) Type

NodeGroup_Role

type NodeGroup_Role int32

Node group roles.

NodeGroup_ROLE_UNSPECIFIED, NodeGroup_DRIVER

const (
	// Required unspecified role.
	NodeGroup_ROLE_UNSPECIFIED NodeGroup_Role = 0
	// Job drivers run on the node group.
	NodeGroup_DRIVER NodeGroup_Role = 1
)

func (NodeGroup_Role) Descriptor

func (NodeGroup_Role) Enum

func (x NodeGroup_Role) Enum() *NodeGroup_Role

func (NodeGroup_Role) EnumDescriptor

func (NodeGroup_Role) EnumDescriptor() ([]byte, []int)

Deprecated: Use NodeGroup_Role.Descriptor instead.

func (NodeGroup_Role) Number

func (NodeGroup_Role) String

func (x NodeGroup_Role) String() string

func (NodeGroup_Role) Type

NodeInitializationAction

type NodeInitializationAction struct {

	// Required. Cloud Storage URI of executable file.
	ExecutableFile string `protobuf:"bytes,1,opt,name=executable_file,json=executableFile,proto3" json:"executable_file,omitempty"`
	// Optional. Amount of time executable has to complete. Default is
	// 10 minutes (see JSON representation of
	// [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
	//
	// Cluster creation fails with an explanatory error message (the
	// name of the executable that caused the error and the exceeded timeout
	// period) if the executable is not completed at end of the timeout period.
	ExecutionTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=execution_timeout,json=executionTimeout,proto3" json:"execution_timeout,omitempty"`
	// contains filtered or unexported fields
}

Specifies an executable to run on a fully configured node and a timeout period for executable completion.

func (*NodeInitializationAction) Descriptor

func (*NodeInitializationAction) Descriptor() ([]byte, []int)

Deprecated: Use NodeInitializationAction.ProtoReflect.Descriptor instead.

func (*NodeInitializationAction) GetExecutableFile

func (x *NodeInitializationAction) GetExecutableFile() string

func (*NodeInitializationAction) GetExecutionTimeout

func (x *NodeInitializationAction) GetExecutionTimeout() *durationpb.Duration

func (*NodeInitializationAction) ProtoMessage

func (*NodeInitializationAction) ProtoMessage()

func (*NodeInitializationAction) ProtoReflect

func (x *NodeInitializationAction) ProtoReflect() protoreflect.Message

func (*NodeInitializationAction) Reset

func (x *NodeInitializationAction) Reset()

func (*NodeInitializationAction) String

func (x *NodeInitializationAction) String() string

OrderedJob

type OrderedJob struct {
	StepId string `protobuf:"bytes,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"`

	JobType isOrderedJob_JobType `protobuf_oneof:"job_type"`

	Labels map[string]string "" /* 153 byte string literal not displayed */

	Scheduling *JobScheduling `protobuf:"bytes,9,opt,name=scheduling,proto3" json:"scheduling,omitempty"`

	PrerequisiteStepIds []string `protobuf:"bytes,10,rep,name=prerequisite_step_ids,json=prerequisiteStepIds,proto3" json:"prerequisite_step_ids,omitempty"`

}

A job executed by the workflow.

func (*OrderedJob) Descriptor

func (*OrderedJob) Descriptor() ([]byte, []int)

Deprecated: Use OrderedJob.ProtoReflect.Descriptor instead.

func (*OrderedJob) GetHadoopJob

func (x *OrderedJob) GetHadoopJob() *HadoopJob

func (*OrderedJob) GetHiveJob

func (x *OrderedJob) GetHiveJob() *HiveJob

func (*OrderedJob) GetJobType

func (m *OrderedJob) GetJobType() isOrderedJob_JobType

func (*OrderedJob) GetLabels

func (x *OrderedJob) GetLabels() map[string]string

func (*OrderedJob) GetPigJob

func (x *OrderedJob) GetPigJob() *PigJob

func (*OrderedJob) GetPrerequisiteStepIds

func (x *OrderedJob) GetPrerequisiteStepIds() []string

func (*OrderedJob) GetPrestoJob

func (x *OrderedJob) GetPrestoJob() *PrestoJob

func (*OrderedJob) GetPysparkJob

func (x *OrderedJob) GetPysparkJob() *PySparkJob

func (*OrderedJob) GetScheduling

func (x *OrderedJob) GetScheduling() *JobScheduling

func (*OrderedJob) GetSparkJob

func (x *OrderedJob) GetSparkJob() *SparkJob

func (*OrderedJob) GetSparkRJob

func (x *OrderedJob) GetSparkRJob() *SparkRJob

func (*OrderedJob) GetSparkSqlJob

func (x *OrderedJob) GetSparkSqlJob() *SparkSqlJob

func (*OrderedJob) GetStepId

func (x *OrderedJob) GetStepId() string

func (*OrderedJob) ProtoMessage

func (*OrderedJob) ProtoMessage()

func (*OrderedJob) ProtoReflect

func (x *OrderedJob) ProtoReflect() protoreflect.Message

func (*OrderedJob) Reset

func (x *OrderedJob) Reset()

func (*OrderedJob) String

func (x *OrderedJob) String() string

OrderedJob_HadoopJob

type OrderedJob_HadoopJob struct {
	// Optional. Job is a Hadoop job.
	HadoopJob *HadoopJob `protobuf:"bytes,2,opt,name=hadoop_job,json=hadoopJob,proto3,oneof"`
}

OrderedJob_HiveJob

type OrderedJob_HiveJob struct {
	// Optional. Job is a Hive job.
	HiveJob *HiveJob `protobuf:"bytes,5,opt,name=hive_job,json=hiveJob,proto3,oneof"`
}

OrderedJob_PigJob

type OrderedJob_PigJob struct {
	// Optional. Job is a Pig job.
	PigJob *PigJob `protobuf:"bytes,6,opt,name=pig_job,json=pigJob,proto3,oneof"`
}

OrderedJob_PrestoJob

type OrderedJob_PrestoJob struct {
	// Optional. Job is a Presto job.
	PrestoJob *PrestoJob `protobuf:"bytes,12,opt,name=presto_job,json=prestoJob,proto3,oneof"`
}

OrderedJob_PysparkJob

type OrderedJob_PysparkJob struct {
	// Optional. Job is a PySpark job.
	PysparkJob *PySparkJob `protobuf:"bytes,4,opt,name=pyspark_job,json=pysparkJob,proto3,oneof"`
}

OrderedJob_SparkJob

type OrderedJob_SparkJob struct {
	// Optional. Job is a Spark job.
	SparkJob *SparkJob `protobuf:"bytes,3,opt,name=spark_job,json=sparkJob,proto3,oneof"`
}

OrderedJob_SparkRJob

type OrderedJob_SparkRJob struct {
	// Optional. Job is a SparkR job.
	SparkRJob *SparkRJob `protobuf:"bytes,11,opt,name=spark_r_job,json=sparkRJob,proto3,oneof"`
}

OrderedJob_SparkSqlJob

type OrderedJob_SparkSqlJob struct {
	// Optional. Job is a SparkSql job.
	SparkSqlJob *SparkSqlJob `protobuf:"bytes,7,opt,name=spark_sql_job,json=sparkSqlJob,proto3,oneof"`
}

ParameterValidation

type ParameterValidation struct {

	// Required. The type of validation to be performed.
	//
	// Types that are assignable to ValidationType:
	//
	//	*ParameterValidation_Regex
	//	*ParameterValidation_Values
	ValidationType isParameterValidation_ValidationType `protobuf_oneof:"validation_type"`
	// contains filtered or unexported fields
}

Configuration for parameter validation.

func (*ParameterValidation) Descriptor

func (*ParameterValidation) Descriptor() ([]byte, []int)

Deprecated: Use ParameterValidation.ProtoReflect.Descriptor instead.

func (*ParameterValidation) GetRegex

func (x *ParameterValidation) GetRegex() *RegexValidation

func (*ParameterValidation) GetValidationType

func (m *ParameterValidation) GetValidationType() isParameterValidation_ValidationType

func (*ParameterValidation) GetValues

func (x *ParameterValidation) GetValues() *ValueValidation

func (*ParameterValidation) ProtoMessage

func (*ParameterValidation) ProtoMessage()

func (*ParameterValidation) ProtoReflect

func (x *ParameterValidation) ProtoReflect() protoreflect.Message

func (*ParameterValidation) Reset

func (x *ParameterValidation) Reset()

func (*ParameterValidation) String

func (x *ParameterValidation) String() string

ParameterValidation_Regex

type ParameterValidation_Regex struct {
	// Validation based on regular expressions.
	Regex *RegexValidation `protobuf:"bytes,1,opt,name=regex,proto3,oneof"`
}

ParameterValidation_Values

type ParameterValidation_Values struct {
	// Validation based on a list of allowed values.
	Values *ValueValidation `protobuf:"bytes,2,opt,name=values,proto3,oneof"`
}

PeripheralsConfig

type PeripheralsConfig struct {
	MetastoreService string `protobuf:"bytes,1,opt,name=metastore_service,json=metastoreService,proto3" json:"metastore_service,omitempty"`

	SparkHistoryServerConfig *SparkHistoryServerConfig "" /* 137 byte string literal not displayed */

}

Auxiliary services configuration for a workload.

func (*PeripheralsConfig) Descriptor

func (*PeripheralsConfig) Descriptor() ([]byte, []int)

Deprecated: Use PeripheralsConfig.ProtoReflect.Descriptor instead.

func (*PeripheralsConfig) GetMetastoreService

func (x *PeripheralsConfig) GetMetastoreService() string

func (*PeripheralsConfig) GetSparkHistoryServerConfig

func (x *PeripheralsConfig) GetSparkHistoryServerConfig() *SparkHistoryServerConfig

func (*PeripheralsConfig) ProtoMessage

func (*PeripheralsConfig) ProtoMessage()

func (*PeripheralsConfig) ProtoReflect

func (x *PeripheralsConfig) ProtoReflect() protoreflect.Message

func (*PeripheralsConfig) Reset

func (x *PeripheralsConfig) Reset()

func (*PeripheralsConfig) String

func (x *PeripheralsConfig) String() string

PigJob

type PigJob struct {
	Queries isPigJob_Queries `protobuf_oneof:"queries"`

	ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"`

	ScriptVariables map[string]string "" /* 194 byte string literal not displayed */

	Properties map[string]string "" /* 161 byte string literal not displayed */

	JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`

	LoggingConfig *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`

}

A Dataproc job for running Apache Pig queries on YARN.

func (*PigJob) Descriptor

func (*PigJob) Descriptor() ([]byte, []int)

Deprecated: Use PigJob.ProtoReflect.Descriptor instead.

func (*PigJob) GetContinueOnFailure

func (x *PigJob) GetContinueOnFailure() bool

func (*PigJob) GetJarFileUris

func (x *PigJob) GetJarFileUris() []string

func (*PigJob) GetLoggingConfig

func (x *PigJob) GetLoggingConfig() *LoggingConfig

func (*PigJob) GetProperties

func (x *PigJob) GetProperties() map[string]string

func (*PigJob) GetQueries

func (m *PigJob) GetQueries() isPigJob_Queries

func (*PigJob) GetQueryFileUri

func (x *PigJob) GetQueryFileUri() string

func (*PigJob) GetQueryList

func (x *PigJob) GetQueryList() *QueryList

func (*PigJob) GetScriptVariables

func (x *PigJob) GetScriptVariables() map[string]string

func (*PigJob) ProtoMessage

func (*PigJob) ProtoMessage()

func (*PigJob) ProtoReflect

func (x *PigJob) ProtoReflect() protoreflect.Message

func (*PigJob) Reset

func (x *PigJob) Reset()

func (*PigJob) String

func (x *PigJob) String() string

PigJob_QueryFileUri

type PigJob_QueryFileUri struct {
	// The HCFS URI of the script that contains the Pig queries.
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}

PigJob_QueryList

type PigJob_QueryList struct {
	// A list of queries.
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}

PrestoJob

type PrestoJob struct {
	Queries isPrestoJob_Queries `protobuf_oneof:"queries"`

	ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"`

	OutputFormat string `protobuf:"bytes,4,opt,name=output_format,json=outputFormat,proto3" json:"output_format,omitempty"`

	ClientTags []string `protobuf:"bytes,5,rep,name=client_tags,json=clientTags,proto3" json:"client_tags,omitempty"`

	Properties map[string]string "" /* 161 byte string literal not displayed */

	LoggingConfig *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`

}

A Dataproc job for running Presto queries. IMPORTANT: The Dataproc Presto Optional Component must be enabled when the cluster is created to submit a Presto job to the cluster.

func (*PrestoJob) Descriptor

func (*PrestoJob) Descriptor() ([]byte, []int)

Deprecated: Use PrestoJob.ProtoReflect.Descriptor instead.

func (*PrestoJob) GetClientTags

func (x *PrestoJob) GetClientTags() []string

func (*PrestoJob) GetContinueOnFailure

func (x *PrestoJob) GetContinueOnFailure() bool

func (*PrestoJob) GetLoggingConfig

func (x *PrestoJob) GetLoggingConfig() *LoggingConfig

func (*PrestoJob) GetOutputFormat

func (x *PrestoJob) GetOutputFormat() string

func (*PrestoJob) GetProperties

func (x *PrestoJob) GetProperties() map[string]string

func (*PrestoJob) GetQueries

func (m *PrestoJob) GetQueries() isPrestoJob_Queries

func (*PrestoJob) GetQueryFileUri

func (x *PrestoJob) GetQueryFileUri() string

func (*PrestoJob) GetQueryList

func (x *PrestoJob) GetQueryList() *QueryList

func (*PrestoJob) ProtoMessage

func (*PrestoJob) ProtoMessage()

func (*PrestoJob) ProtoReflect

func (x *PrestoJob) ProtoReflect() protoreflect.Message

func (*PrestoJob) Reset

func (x *PrestoJob) Reset()

func (*PrestoJob) String

func (x *PrestoJob) String() string

PrestoJob_QueryFileUri

type PrestoJob_QueryFileUri struct {
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}

PrestoJob_QueryList

type PrestoJob_QueryList struct {
	// A list of queries.
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}

PySparkBatch

type PySparkBatch struct {

	// Required. The HCFS URI of the main Python file to use as the Spark driver. Must
	// be a .py file.
	MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri,proto3" json:"main_python_file_uri,omitempty"`
	// Optional. The arguments to pass to the driver. Do not include arguments
	// that can be set as batch properties, such as `--conf`, since a collision
	// can occur that causes an incorrect batch submission.
	Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"`
	// Optional. HCFS file URIs of Python files to pass to the PySpark
	// framework. Supported file types: `.py`, `.egg`, and `.zip`.
	PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris,proto3" json:"python_file_uris,omitempty"`
	// Optional. HCFS URIs of jar files to add to the classpath of the
	// Spark driver and tasks.
	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	// Optional. HCFS URIs of files to be placed in the working directory of
	// each executor.
	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
	// Optional. HCFS URIs of archives to be extracted into the working directory
	// of each executor. Supported file types:
	// `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
	// contains filtered or unexported fields
}

A configuration for running an Apache PySpark batch workload.

func (*PySparkBatch) Descriptor

func (*PySparkBatch) Descriptor() ([]byte, []int)

Deprecated: Use PySparkBatch.ProtoReflect.Descriptor instead.

func (*PySparkBatch) GetArchiveUris

func (x *PySparkBatch) GetArchiveUris() []string

func (*PySparkBatch) GetArgs

func (x *PySparkBatch) GetArgs() []string

func (*PySparkBatch) GetFileUris

func (x *PySparkBatch) GetFileUris() []string

func (*PySparkBatch) GetJarFileUris

func (x *PySparkBatch) GetJarFileUris() []string

func (*PySparkBatch) GetMainPythonFileUri

func (x *PySparkBatch) GetMainPythonFileUri() string

func (*PySparkBatch) GetPythonFileUris

func (x *PySparkBatch) GetPythonFileUris() []string

func (*PySparkBatch) ProtoMessage

func (*PySparkBatch) ProtoMessage()

func (*PySparkBatch) ProtoReflect

func (x *PySparkBatch) ProtoReflect() protoreflect.Message

func (*PySparkBatch) Reset

func (x *PySparkBatch) Reset()

func (*PySparkBatch) String

func (x *PySparkBatch) String() string

PySparkJob

type PySparkJob struct {
	MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri,proto3" json:"main_python_file_uri,omitempty"`

	Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"`

	PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris,proto3" json:"python_file_uris,omitempty"`

	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`

	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`

	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`

	Properties map[string]string "" /* 161 byte string literal not displayed */

	LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`

}

A Dataproc job for running Apache PySpark applications on YARN.

func (*PySparkJob) Descriptor

func (*PySparkJob) Descriptor() ([]byte, []int)

Deprecated: Use PySparkJob.ProtoReflect.Descriptor instead.

func (*PySparkJob) GetArchiveUris

func (x *PySparkJob) GetArchiveUris() []string

func (*PySparkJob) GetArgs

func (x *PySparkJob) GetArgs() []string

func (*PySparkJob) GetFileUris

func (x *PySparkJob) GetFileUris() []string

func (*PySparkJob) GetJarFileUris

func (x *PySparkJob) GetJarFileUris() []string

func (*PySparkJob) GetLoggingConfig

func (x *PySparkJob) GetLoggingConfig() *LoggingConfig

func (*PySparkJob) GetMainPythonFileUri

func (x *PySparkJob) GetMainPythonFileUri() string

func (*PySparkJob) GetProperties

func (x *PySparkJob) GetProperties() map[string]string

func (*PySparkJob) GetPythonFileUris

func (x *PySparkJob) GetPythonFileUris() []string

func (*PySparkJob) ProtoMessage

func (*PySparkJob) ProtoMessage()

func (*PySparkJob) ProtoReflect

func (x *PySparkJob) ProtoReflect() protoreflect.Message

func (*PySparkJob) Reset

func (x *PySparkJob) Reset()

func (*PySparkJob) String

func (x *PySparkJob) String() string

QueryList

type QueryList struct {

	// Required. The queries to execute. You do not need to end a query expression
	// with a semicolon. Multiple queries can be specified in one
	// string by separating each with a semicolon. Here is an example of a
	// Dataproc API snippet that uses a QueryList to specify a HiveJob:
	//
	//	"hiveJob": {
	//	  "queryList": {
	//	    "queries": [
	//	      "query1",
	//	      "query2",
	//	      "query3;query4",
	//	    ]
	//	  }
	//	}
	Queries []string `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"`
	// contains filtered or unexported fields
}

A list of queries to run on a cluster.

func (*QueryList) Descriptor

func (*QueryList) Descriptor() ([]byte, []int)

Deprecated: Use QueryList.ProtoReflect.Descriptor instead.

func (*QueryList) GetQueries

func (x *QueryList) GetQueries() []string

func (*QueryList) ProtoMessage

func (*QueryList) ProtoMessage()

func (*QueryList) ProtoReflect

func (x *QueryList) ProtoReflect() protoreflect.Message

func (*QueryList) Reset

func (x *QueryList) Reset()

func (*QueryList) String

func (x *QueryList) String() string

RegexValidation

type RegexValidation struct {

	// Required. RE2 regular expressions used to validate the parameter's value.
	// The value must match the regex in its entirety (substring
	// matches are not sufficient).
	Regexes []string `protobuf:"bytes,1,rep,name=regexes,proto3" json:"regexes,omitempty"`
	// contains filtered or unexported fields
}

Validation based on regular expressions.

func (*RegexValidation) Descriptor

func (*RegexValidation) Descriptor() ([]byte, []int)

Deprecated: Use RegexValidation.ProtoReflect.Descriptor instead.

func (*RegexValidation) GetRegexes

func (x *RegexValidation) GetRegexes() []string

func (*RegexValidation) ProtoMessage

func (*RegexValidation) ProtoMessage()

func (*RegexValidation) ProtoReflect

func (x *RegexValidation) ProtoReflect() protoreflect.Message

func (*RegexValidation) Reset

func (x *RegexValidation) Reset()

func (*RegexValidation) String

func (x *RegexValidation) String() string

ReservationAffinity

type ReservationAffinity struct {
	ConsumeReservationType ReservationAffinity_Type "" /* 185 byte string literal not displayed */

	Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`

	Values []string `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"`

}

Reservation Affinity for consuming Zonal reservation.

func (*ReservationAffinity) Descriptor

func (*ReservationAffinity) Descriptor() ([]byte, []int)

Deprecated: Use ReservationAffinity.ProtoReflect.Descriptor instead.

func (*ReservationAffinity) GetConsumeReservationType

func (x *ReservationAffinity) GetConsumeReservationType() ReservationAffinity_Type

func (*ReservationAffinity) GetKey

func (x *ReservationAffinity) GetKey() string

func (*ReservationAffinity) GetValues

func (x *ReservationAffinity) GetValues() []string

func (*ReservationAffinity) ProtoMessage

func (*ReservationAffinity) ProtoMessage()

func (*ReservationAffinity) ProtoReflect

func (x *ReservationAffinity) ProtoReflect() protoreflect.Message

func (*ReservationAffinity) Reset

func (x *ReservationAffinity) Reset()

func (*ReservationAffinity) String

func (x *ReservationAffinity) String() string

ReservationAffinity_Type

type ReservationAffinity_Type int32

Indicates whether to consume capacity from an reservation or not.

ReservationAffinity_TYPE_UNSPECIFIED, ReservationAffinity_NO_RESERVATION, ReservationAffinity_ANY_RESERVATION, ReservationAffinity_SPECIFIC_RESERVATION

const (
	ReservationAffinity_TYPE_UNSPECIFIED ReservationAffinity_Type = 0
	// Do not consume from any allocated capacity.
	ReservationAffinity_NO_RESERVATION ReservationAffinity_Type = 1
	// Consume any reservation available.
	ReservationAffinity_ANY_RESERVATION ReservationAffinity_Type = 2
	// Must consume from a specific reservation. Must specify key value fields
	// for specifying the reservations.
	ReservationAffinity_SPECIFIC_RESERVATION ReservationAffinity_Type = 3
)

func (ReservationAffinity_Type) Descriptor

func (ReservationAffinity_Type) Enum

func (ReservationAffinity_Type) EnumDescriptor

func (ReservationAffinity_Type) EnumDescriptor() ([]byte, []int)

Deprecated: Use ReservationAffinity_Type.Descriptor instead.

func (ReservationAffinity_Type) Number

func (ReservationAffinity_Type) String

func (x ReservationAffinity_Type) String() string

func (ReservationAffinity_Type) Type

ResizeNodeGroupRequest

type ResizeNodeGroupRequest struct {
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`

	Size int32 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`

	RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`

	GracefulDecommissionTimeout *durationpb.Duration "" /* 144 byte string literal not displayed */

}

A request to resize a node group.

func (*ResizeNodeGroupRequest) Descriptor

func (*ResizeNodeGroupRequest) Descriptor() ([]byte, []int)

Deprecated: Use ResizeNodeGroupRequest.ProtoReflect.Descriptor instead.

func (*ResizeNodeGroupRequest) GetGracefulDecommissionTimeout

func (x *ResizeNodeGroupRequest) GetGracefulDecommissionTimeout() *durationpb.Duration

func (*ResizeNodeGroupRequest) GetName

func (x *ResizeNodeGroupRequest) GetName() string

func (*ResizeNodeGroupRequest) GetRequestId

func (x *ResizeNodeGroupRequest) GetRequestId() string

func (*ResizeNodeGroupRequest) GetSize

func (x *ResizeNodeGroupRequest) GetSize() int32

func (*ResizeNodeGroupRequest) ProtoMessage

func (*ResizeNodeGroupRequest) ProtoMessage()

func (*ResizeNodeGroupRequest) ProtoReflect

func (x *ResizeNodeGroupRequest) ProtoReflect() protoreflect.Message

func (*ResizeNodeGroupRequest) Reset

func (x *ResizeNodeGroupRequest) Reset()

func (*ResizeNodeGroupRequest) String

func (x *ResizeNodeGroupRequest) String() string

RuntimeConfig

type RuntimeConfig struct {
	Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`

	ContainerImage string `protobuf:"bytes,2,opt,name=container_image,json=containerImage,proto3" json:"container_image,omitempty"`

	Properties map[string]string "" /* 161 byte string literal not displayed */

}

Runtime configuration for a workload.

func (*RuntimeConfig) Descriptor

func (*RuntimeConfig) Descriptor() ([]byte, []int)

Deprecated: Use RuntimeConfig.ProtoReflect.Descriptor instead.

func (*RuntimeConfig) GetContainerImage

func (x *RuntimeConfig) GetContainerImage() string

func (*RuntimeConfig) GetProperties

func (x *RuntimeConfig) GetProperties() map[string]string

func (*RuntimeConfig) GetVersion

func (x *RuntimeConfig) GetVersion() string

func (*RuntimeConfig) ProtoMessage

func (*RuntimeConfig) ProtoMessage()

func (*RuntimeConfig) ProtoReflect

func (x *RuntimeConfig) ProtoReflect() protoreflect.Message

func (*RuntimeConfig) Reset

func (x *RuntimeConfig) Reset()

func (*RuntimeConfig) String

func (x *RuntimeConfig) String() string

RuntimeInfo

type RuntimeInfo struct {
	Endpoints map[string]string "" /* 159 byte string literal not displayed */

	OutputUri string `protobuf:"bytes,2,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`

	DiagnosticOutputUri string `protobuf:"bytes,3,opt,name=diagnostic_output_uri,json=diagnosticOutputUri,proto3" json:"diagnostic_output_uri,omitempty"`

}

Runtime information about workload execution.

func (*RuntimeInfo) Descriptor

func (*RuntimeInfo) Descriptor() ([]byte, []int)

Deprecated: Use RuntimeInfo.ProtoReflect.Descriptor instead.

func (*RuntimeInfo) GetDiagnosticOutputUri

func (x *RuntimeInfo) GetDiagnosticOutputUri() string

func (*RuntimeInfo) GetEndpoints

func (x *RuntimeInfo) GetEndpoints() map[string]string

func (*RuntimeInfo) GetOutputUri

func (x *RuntimeInfo) GetOutputUri() string

func (*RuntimeInfo) ProtoMessage

func (*RuntimeInfo) ProtoMessage()

func (*RuntimeInfo) ProtoReflect

func (x *RuntimeInfo) ProtoReflect() protoreflect.Message

func (*RuntimeInfo) Reset

func (x *RuntimeInfo) Reset()

func (*RuntimeInfo) String

func (x *RuntimeInfo) String() string

SecurityConfig

type SecurityConfig struct {

	// Optional. Kerberos related configuration.
	KerberosConfig *KerberosConfig `protobuf:"bytes,1,opt,name=kerberos_config,json=kerberosConfig,proto3" json:"kerberos_config,omitempty"`
	// Optional. Identity related configuration, including service account based
	// secure multi-tenancy user mappings.
	IdentityConfig *IdentityConfig `protobuf:"bytes,2,opt,name=identity_config,json=identityConfig,proto3" json:"identity_config,omitempty"`
	// contains filtered or unexported fields
}

Security related configuration, including encryption, Kerberos, etc.

func (*SecurityConfig) Descriptor

func (*SecurityConfig) Descriptor() ([]byte, []int)

Deprecated: Use SecurityConfig.ProtoReflect.Descriptor instead.

func (*SecurityConfig) GetIdentityConfig

func (x *SecurityConfig) GetIdentityConfig() *IdentityConfig

func (*SecurityConfig) GetKerberosConfig

func (x *SecurityConfig) GetKerberosConfig() *KerberosConfig

func (*SecurityConfig) ProtoMessage

func (*SecurityConfig) ProtoMessage()

func (*SecurityConfig) ProtoReflect

func (x *SecurityConfig) ProtoReflect() protoreflect.Message

func (*SecurityConfig) Reset

func (x *SecurityConfig) Reset()

func (*SecurityConfig) String

func (x *SecurityConfig) String() string

ShieldedInstanceConfig

type ShieldedInstanceConfig struct {
	EnableSecureBoot bool `protobuf:"varint,1,opt,name=enable_secure_boot,json=enableSecureBoot,proto3" json:"enable_secure_boot,omitempty"`

	EnableVtpm bool `protobuf:"varint,2,opt,name=enable_vtpm,json=enableVtpm,proto3" json:"enable_vtpm,omitempty"`

	EnableIntegrityMonitoring bool "" /* 139 byte string literal not displayed */

}

Shielded Instance Config for clusters using Compute Engine Shielded VMs.

func (*ShieldedInstanceConfig) Descriptor

func (*ShieldedInstanceConfig) Descriptor() ([]byte, []int)

Deprecated: Use ShieldedInstanceConfig.ProtoReflect.Descriptor instead.

func (*ShieldedInstanceConfig) GetEnableIntegrityMonitoring

func (x *ShieldedInstanceConfig) GetEnableIntegrityMonitoring() bool

func (*ShieldedInstanceConfig) GetEnableSecureBoot

func (x *ShieldedInstanceConfig) GetEnableSecureBoot() bool

func (*ShieldedInstanceConfig) GetEnableVtpm

func (x *ShieldedInstanceConfig) GetEnableVtpm() bool

func (*ShieldedInstanceConfig) ProtoMessage

func (*ShieldedInstanceConfig) ProtoMessage()

func (*ShieldedInstanceConfig) ProtoReflect

func (x *ShieldedInstanceConfig) ProtoReflect() protoreflect.Message

func (*ShieldedInstanceConfig) Reset

func (x *ShieldedInstanceConfig) Reset()

func (*ShieldedInstanceConfig) String

func (x *ShieldedInstanceConfig) String() string

SoftwareConfig

type SoftwareConfig struct {
	ImageVersion string `protobuf:"bytes,1,opt,name=image_version,json=imageVersion,proto3" json:"image_version,omitempty"`

	Properties map[string]string "" /* 161 byte string literal not displayed */

	OptionalComponents []Component "" /* 163 byte string literal not displayed */

}

Specifies the selection and config of software inside the cluster.

func (*SoftwareConfig) Descriptor

func (*SoftwareConfig) Descriptor() ([]byte, []int)

Deprecated: Use SoftwareConfig.ProtoReflect.Descriptor instead.

func (*SoftwareConfig) GetImageVersion

func (x *SoftwareConfig) GetImageVersion() string

func (*SoftwareConfig) GetOptionalComponents

func (x *SoftwareConfig) GetOptionalComponents() []Component

func (*SoftwareConfig) GetProperties

func (x *SoftwareConfig) GetProperties() map[string]string

func (*SoftwareConfig) ProtoMessage

func (*SoftwareConfig) ProtoMessage()

func (*SoftwareConfig) ProtoReflect

func (x *SoftwareConfig) ProtoReflect() protoreflect.Message

func (*SoftwareConfig) Reset

func (x *SoftwareConfig) Reset()

func (*SoftwareConfig) String

func (x *SoftwareConfig) String() string

SparkBatch

type SparkBatch struct {

	// The specification of the main method to call to drive the Spark
	// workload. Specify either the jar file that contains the main class or the
	// main class name. To pass both a main jar and a main class in that jar, add
	// the jar to `jar_file_uris`, and then specify the main class
	// name in `main_class`.
	//
	// Types that are assignable to Driver:
	//
	//	*SparkBatch_MainJarFileUri
	//	*SparkBatch_MainClass
	Driver isSparkBatch_Driver `protobuf_oneof:"driver"`
	// Optional. The arguments to pass to the driver. Do not include arguments
	// that can be set as batch properties, such as `--conf`, since a collision
	// can occur that causes an incorrect batch submission.
	Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
	// Optional. HCFS URIs of jar files to add to the classpath of the
	// Spark driver and tasks.
	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	// Optional. HCFS URIs of files to be placed in the working directory of
	// each executor.
	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
	// Optional. HCFS URIs of archives to be extracted into the working directory
	// of each executor. Supported file types:
	// `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
	// contains filtered or unexported fields
}

A configuration for running an Apache Spark batch workload.

func (*SparkBatch) Descriptor

func (*SparkBatch) Descriptor() ([]byte, []int)

Deprecated: Use SparkBatch.ProtoReflect.Descriptor instead.

func (*SparkBatch) GetArchiveUris

func (x *SparkBatch) GetArchiveUris() []string

func (*SparkBatch) GetArgs

func (x *SparkBatch) GetArgs() []string

func (*SparkBatch) GetDriver

func (m *SparkBatch) GetDriver() isSparkBatch_Driver

func (*SparkBatch) GetFileUris

func (x *SparkBatch) GetFileUris() []string

func (*SparkBatch) GetJarFileUris

func (x *SparkBatch) GetJarFileUris() []string

func (*SparkBatch) GetMainClass

func (x *SparkBatch) GetMainClass() string

func (*SparkBatch) GetMainJarFileUri

func (x *SparkBatch) GetMainJarFileUri() string

func (*SparkBatch) ProtoMessage

func (*SparkBatch) ProtoMessage()

func (*SparkBatch) ProtoReflect

func (x *SparkBatch) ProtoReflect() protoreflect.Message

func (*SparkBatch) Reset

func (x *SparkBatch) Reset()

func (*SparkBatch) String

func (x *SparkBatch) String() string

SparkBatch_MainClass

type SparkBatch_MainClass struct {
	// Optional. The name of the driver main class. The jar file that contains the class
	// must be in the classpath or specified in `jar_file_uris`.
	MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"`
}

SparkBatch_MainJarFileUri

type SparkBatch_MainJarFileUri struct {
	// Optional. The HCFS URI of the jar file that contains the main class.
	MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"`
}

SparkHistoryServerConfig

type SparkHistoryServerConfig struct {

	// Optional. Resource name of an existing Dataproc Cluster to act as a Spark History
	// Server for the workload.
	//
	// Example:
	//
	// * `projects/[project_id]/regions/[region]/clusters/[cluster_name]`
	DataprocCluster string `protobuf:"bytes,1,opt,name=dataproc_cluster,json=dataprocCluster,proto3" json:"dataproc_cluster,omitempty"`
	// contains filtered or unexported fields
}

Spark History Server configuration for the workload.

func (*SparkHistoryServerConfig) Descriptor

func (*SparkHistoryServerConfig) Descriptor() ([]byte, []int)

Deprecated: Use SparkHistoryServerConfig.ProtoReflect.Descriptor instead.

func (*SparkHistoryServerConfig) GetDataprocCluster

func (x *SparkHistoryServerConfig) GetDataprocCluster() string

func (*SparkHistoryServerConfig) ProtoMessage

func (*SparkHistoryServerConfig) ProtoMessage()

func (*SparkHistoryServerConfig) ProtoReflect

func (x *SparkHistoryServerConfig) ProtoReflect() protoreflect.Message

func (*SparkHistoryServerConfig) Reset

func (x *SparkHistoryServerConfig) Reset()

func (*SparkHistoryServerConfig) String

func (x *SparkHistoryServerConfig) String() string

SparkJob

type SparkJob struct {
	Driver isSparkJob_Driver `protobuf_oneof:"driver"`

	Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`

	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`

	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`

	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`

	Properties map[string]string "" /* 161 byte string literal not displayed */

	LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`

}

A Dataproc job for running Apache Spark applications on YARN.

func (*SparkJob) Descriptor

func (*SparkJob) Descriptor() ([]byte, []int)

Deprecated: Use SparkJob.ProtoReflect.Descriptor instead.

func (*SparkJob) GetArchiveUris

func (x *SparkJob) GetArchiveUris() []string

func (*SparkJob) GetArgs

func (x *SparkJob) GetArgs() []string

func (*SparkJob) GetDriver

func (m *SparkJob) GetDriver() isSparkJob_Driver

func (*SparkJob) GetFileUris

func (x *SparkJob) GetFileUris() []string

func (*SparkJob) GetJarFileUris

func (x *SparkJob) GetJarFileUris() []string

func (*SparkJob) GetLoggingConfig

func (x *SparkJob) GetLoggingConfig() *LoggingConfig

func (*SparkJob) GetMainClass

func (x *SparkJob) GetMainClass() string

func (*SparkJob) GetMainJarFileUri

func (x *SparkJob) GetMainJarFileUri() string

func (*SparkJob) GetProperties

func (x *SparkJob) GetProperties() map[string]string

func (*SparkJob) ProtoMessage

func (*SparkJob) ProtoMessage()

func (*SparkJob) ProtoReflect

func (x *SparkJob) ProtoReflect() protoreflect.Message

func (*SparkJob) Reset

func (x *SparkJob) Reset()

func (*SparkJob) String

func (x *SparkJob) String() string

SparkJob_MainClass

type SparkJob_MainClass struct {
	// The name of the driver's main class. The jar file that contains the class
	// must be in the default CLASSPATH or specified in `jar_file_uris`.
	MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"`
}

SparkJob_MainJarFileUri

type SparkJob_MainJarFileUri struct {
	// The HCFS URI of the jar file that contains the main class.
	MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"`
}

SparkRBatch

type SparkRBatch struct {

	// Required. The HCFS URI of the main R file to use as the driver.
	// Must be a `.R` or `.r` file.
	MainRFileUri string `protobuf:"bytes,1,opt,name=main_r_file_uri,json=mainRFileUri,proto3" json:"main_r_file_uri,omitempty"`
	// Optional. The arguments to pass to the Spark driver. Do not include arguments
	// that can be set as batch properties, such as `--conf`, since a collision
	// can occur that causes an incorrect batch submission.
	Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"`
	// Optional. HCFS URIs of files to be placed in the working directory of
	// each executor.
	FileUris []string `protobuf:"bytes,3,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
	// Optional. HCFS URIs of archives to be extracted into the working directory
	// of each executor. Supported file types:
	// `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
	ArchiveUris []string `protobuf:"bytes,4,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
	// contains filtered or unexported fields
}

A configuration for running an Apache SparkR batch workload.

func (*SparkRBatch) Descriptor

func (*SparkRBatch) Descriptor() ([]byte, []int)

Deprecated: Use SparkRBatch.ProtoReflect.Descriptor instead.

func (*SparkRBatch) GetArchiveUris

func (x *SparkRBatch) GetArchiveUris() []string

func (*SparkRBatch) GetArgs

func (x *SparkRBatch) GetArgs() []string

func (*SparkRBatch) GetFileUris

func (x *SparkRBatch) GetFileUris() []string

func (*SparkRBatch) GetMainRFileUri

func (x *SparkRBatch) GetMainRFileUri() string

func (*SparkRBatch) ProtoMessage

func (*SparkRBatch) ProtoMessage()

func (*SparkRBatch) ProtoReflect

func (x *SparkRBatch) ProtoReflect() protoreflect.Message

func (*SparkRBatch) Reset

func (x *SparkRBatch) Reset()

func (*SparkRBatch) String

func (x *SparkRBatch) String() string

SparkRJob

type SparkRJob struct {
	MainRFileUri string `protobuf:"bytes,1,opt,name=main_r_file_uri,json=mainRFileUri,proto3" json:"main_r_file_uri,omitempty"`

	Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"`

	FileUris []string `protobuf:"bytes,3,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`

	ArchiveUris []string `protobuf:"bytes,4,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`

	Properties map[string]string "" /* 161 byte string literal not displayed */

	LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`

}

A Dataproc job for running Apache SparkR applications on YARN.

func (*SparkRJob) Descriptor

func (*SparkRJob) Descriptor() ([]byte, []int)

Deprecated: Use SparkRJob.ProtoReflect.Descriptor instead.

func (*SparkRJob) GetArchiveUris

func (x *SparkRJob) GetArchiveUris() []string

func (*SparkRJob) GetArgs

func (x *SparkRJob) GetArgs() []string

func (*SparkRJob) GetFileUris

func (x *SparkRJob) GetFileUris() []string

func (*SparkRJob) GetLoggingConfig

func (x *SparkRJob) GetLoggingConfig() *LoggingConfig

func (*SparkRJob) GetMainRFileUri

func (x *SparkRJob) GetMainRFileUri() string

func (*SparkRJob) GetProperties

func (x *SparkRJob) GetProperties() map[string]string

func (*SparkRJob) ProtoMessage

func (*SparkRJob) ProtoMessage()

func (*SparkRJob) ProtoReflect

func (x *SparkRJob) ProtoReflect() protoreflect.Message

func (*SparkRJob) Reset

func (x *SparkRJob) Reset()

func (*SparkRJob) String

func (x *SparkRJob) String() string

SparkSqlBatch

type SparkSqlBatch struct {
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3" json:"query_file_uri,omitempty"`

	QueryVariables map[string]string "" /* 191 byte string literal not displayed */

	JarFileUris []string `protobuf:"bytes,3,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`

}

A configuration for running Apache Spark SQL queries as a batch workload.

func (*SparkSqlBatch) Descriptor

func (*SparkSqlBatch) Descriptor() ([]byte, []int)

Deprecated: Use SparkSqlBatch.ProtoReflect.Descriptor instead.

func (*SparkSqlBatch) GetJarFileUris

func (x *SparkSqlBatch) GetJarFileUris() []string

func (*SparkSqlBatch) GetQueryFileUri

func (x *SparkSqlBatch) GetQueryFileUri() string

func (*SparkSqlBatch) GetQueryVariables

func (x *SparkSqlBatch) GetQueryVariables() map[string]string

func (*SparkSqlBatch) ProtoMessage

func (*SparkSqlBatch) ProtoMessage()

func (*SparkSqlBatch) ProtoReflect

func (x *SparkSqlBatch) ProtoReflect() protoreflect.Message

func (*SparkSqlBatch) Reset

func (x *SparkSqlBatch) Reset()

func (*SparkSqlBatch) String

func (x *SparkSqlBatch) String() string

SparkSqlJob

type SparkSqlJob struct {
	Queries isSparkSqlJob_Queries `protobuf_oneof:"queries"`

	ScriptVariables map[string]string "" /* 194 byte string literal not displayed */

	Properties map[string]string "" /* 161 byte string literal not displayed */

	JarFileUris []string `protobuf:"bytes,56,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`

	LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`

}

A Dataproc job for running Apache Spark SQL queries.

func (*SparkSqlJob) Descriptor

func (*SparkSqlJob) Descriptor() ([]byte, []int)

Deprecated: Use SparkSqlJob.ProtoReflect.Descriptor instead.

func (*SparkSqlJob) GetJarFileUris

func (x *SparkSqlJob) GetJarFileUris() []string

func (*SparkSqlJob) GetLoggingConfig

func (x *SparkSqlJob) GetLoggingConfig() *LoggingConfig

func (*SparkSqlJob) GetProperties

func (x *SparkSqlJob) GetProperties() map[string]string

func (*SparkSqlJob) GetQueries

func (m *SparkSqlJob) GetQueries() isSparkSqlJob_Queries

func (*SparkSqlJob) GetQueryFileUri

func (x *SparkSqlJob) GetQueryFileUri() string

func (*SparkSqlJob) GetQueryList

func (x *SparkSqlJob) GetQueryList() *QueryList

func (*SparkSqlJob) GetScriptVariables

func (x *SparkSqlJob) GetScriptVariables() map[string]string

func (*SparkSqlJob) ProtoMessage

func (*SparkSqlJob) ProtoMessage()

func (*SparkSqlJob) ProtoReflect

func (x *SparkSqlJob) ProtoReflect() protoreflect.Message

func (*SparkSqlJob) Reset

func (x *SparkSqlJob) Reset()

func (*SparkSqlJob) String

func (x *SparkSqlJob) String() string

SparkSqlJob_QueryFileUri

type SparkSqlJob_QueryFileUri struct {
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}

SparkSqlJob_QueryList

type SparkSqlJob_QueryList struct {
	// A list of queries.
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}

StartClusterRequest

type StartClusterRequest struct {

	// Required. The ID of the Google Cloud Platform project the
	// cluster belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,2,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Optional. Specifying the `cluster_uuid` means the RPC will fail
	// (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
	ClusterUuid string `protobuf:"bytes,4,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	// Optional. A unique ID used to identify the request. If the server
	// receives two
	// [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s
	// with the same id, then the second request will be ignored and the
	// first [google.longrunning.Operation][google.longrunning.Operation] created
	// and stored in the backend is returned.
	//
	// Recommendation: Set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The ID must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	// contains filtered or unexported fields
}

A request to start a cluster.

func (*StartClusterRequest) Descriptor

func (*StartClusterRequest) Descriptor() ([]byte, []int)

Deprecated: Use StartClusterRequest.ProtoReflect.Descriptor instead.

func (*StartClusterRequest) GetClusterName

func (x *StartClusterRequest) GetClusterName() string

func (*StartClusterRequest) GetClusterUuid

func (x *StartClusterRequest) GetClusterUuid() string

func (*StartClusterRequest) GetProjectId

func (x *StartClusterRequest) GetProjectId() string

func (*StartClusterRequest) GetRegion

func (x *StartClusterRequest) GetRegion() string

func (*StartClusterRequest) GetRequestId

func (x *StartClusterRequest) GetRequestId() string

func (*StartClusterRequest) ProtoMessage

func (*StartClusterRequest) ProtoMessage()

func (*StartClusterRequest) ProtoReflect

func (x *StartClusterRequest) ProtoReflect() protoreflect.Message

func (*StartClusterRequest) Reset

func (x *StartClusterRequest) Reset()

func (*StartClusterRequest) String

func (x *StartClusterRequest) String() string

StopClusterRequest

type StopClusterRequest struct {

	// Required. The ID of the Google Cloud Platform project the
	// cluster belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,2,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Optional. Specifying the `cluster_uuid` means the RPC will fail
	// (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
	ClusterUuid string `protobuf:"bytes,4,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	// Optional. A unique ID used to identify the request. If the server
	// receives two
	// [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s
	// with the same id, then the second request will be ignored and the
	// first [google.longrunning.Operation][google.longrunning.Operation] created
	// and stored in the backend is returned.
	//
	// Recommendation: Set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The ID must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	// contains filtered or unexported fields
}

A request to stop a cluster.

func (*StopClusterRequest) Descriptor

func (*StopClusterRequest) Descriptor() ([]byte, []int)

Deprecated: Use StopClusterRequest.ProtoReflect.Descriptor instead.

func (*StopClusterRequest) GetClusterName

func (x *StopClusterRequest) GetClusterName() string

func (*StopClusterRequest) GetClusterUuid

func (x *StopClusterRequest) GetClusterUuid() string

func (*StopClusterRequest) GetProjectId

func (x *StopClusterRequest) GetProjectId() string

func (*StopClusterRequest) GetRegion

func (x *StopClusterRequest) GetRegion() string

func (*StopClusterRequest) GetRequestId

func (x *StopClusterRequest) GetRequestId() string

func (*StopClusterRequest) ProtoMessage

func (*StopClusterRequest) ProtoMessage()

func (*StopClusterRequest) ProtoReflect

func (x *StopClusterRequest) ProtoReflect() protoreflect.Message

func (*StopClusterRequest) Reset

func (x *StopClusterRequest) Reset()

func (*StopClusterRequest) String

func (x *StopClusterRequest) String() string

SubmitJobRequest

type SubmitJobRequest struct {

	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job resource.
	Job *Job `protobuf:"bytes,2,opt,name=job,proto3" json:"job,omitempty"`
	// Optional. A unique id used to identify the request. If the server
	// receives two
	// [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s
	// with the same id, then the second request will be ignored and the
	// first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
	// is returned.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	// contains filtered or unexported fields
}

A request to submit a job.

func (*SubmitJobRequest) Descriptor

func (*SubmitJobRequest) Descriptor() ([]byte, []int)

Deprecated: Use SubmitJobRequest.ProtoReflect.Descriptor instead.

func (*SubmitJobRequest) GetJob

func (x *SubmitJobRequest) GetJob() *Job

func (*SubmitJobRequest) GetProjectId

func (x *SubmitJobRequest) GetProjectId() string

func (*SubmitJobRequest) GetRegion

func (x *SubmitJobRequest) GetRegion() string

func (*SubmitJobRequest) GetRequestId

func (x *SubmitJobRequest) GetRequestId() string

func (*SubmitJobRequest) ProtoMessage

func (*SubmitJobRequest) ProtoMessage()

func (*SubmitJobRequest) ProtoReflect

func (x *SubmitJobRequest) ProtoReflect() protoreflect.Message

func (*SubmitJobRequest) Reset

func (x *SubmitJobRequest) Reset()

func (*SubmitJobRequest) String

func (x *SubmitJobRequest) String() string

TemplateParameter

type TemplateParameter struct {

	// Required. Parameter name.
	// The parameter name is used as the key, and paired with the
	// parameter value, which are passed to the template when the template
	// is instantiated.
	// The name must contain only capital letters (A-Z), numbers (0-9), and
	// underscores (_), and must not start with a number. The maximum length is
	// 40 characters.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Required. Paths to all fields that the parameter replaces.
	// A field is allowed to appear in at most one parameter's list of field
	// paths.
	//
	// A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
	// For example, a field path that references the zone field of a workflow
	// template's cluster selector would be specified as
	// `placement.clusterSelector.zone`.
	//
	// Also, field paths can reference fields using the following syntax:
	//
	// * Values in maps can be referenced by key:
	//   - labels['key']
	//   - placement.clusterSelector.clusterLabels['key']
	//   - placement.managedCluster.labels['key']
	//   - placement.clusterSelector.clusterLabels['key']
	//   - jobs['step-id'].labels['key']
	//
	// * Jobs in the jobs list can be referenced by step-id:
	//   - jobs['step-id'].hadoopJob.mainJarFileUri
	//   - jobs['step-id'].hiveJob.queryFileUri
	//   - jobs['step-id'].pySparkJob.mainPythonFileUri
	//   - jobs['step-id'].hadoopJob.jarFileUris[0]
	//   - jobs['step-id'].hadoopJob.archiveUris[0]
	//   - jobs['step-id'].hadoopJob.fileUris[0]
	//   - jobs['step-id'].pySparkJob.pythonFileUris[0]
	//
	// * Items in repeated fields can be referenced by a zero-based index:
	//   - jobs['step-id'].sparkJob.args[0]
	//
	// * Other examples:
	//   - jobs['step-id'].hadoopJob.properties['key']
	//   - jobs['step-id'].hadoopJob.args[0]
	//   - jobs['step-id'].hiveJob.scriptVariables['key']
	//   - jobs['step-id'].hadoopJob.mainJarFileUri
	//   - placement.clusterSelector.zone
	//
	// It may not be possible to parameterize maps and repeated fields in their
	// entirety since only individual map values and individual items in repeated
	// fields can be referenced. For example, the following field paths are
	// invalid:
	//
	// - placement.clusterSelector.clusterLabels
	// - jobs['step-id'].sparkJob.args
	Fields []string `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"`
	// Optional. Brief description of the parameter.
	// Must not exceed 1024 characters.
	Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
	// Optional. Validation rules to be applied to this parameter's value.
	Validation *ParameterValidation `protobuf:"bytes,4,opt,name=validation,proto3" json:"validation,omitempty"`
	// contains filtered or unexported fields
}

A configurable parameter that replaces one or more fields in the template. Parameterizable fields:

  • Labels
  • File uris
  • Job properties
  • Job arguments
  • Script variables
  • Main class (in HadoopJob and SparkJob)
  • Zone (in ClusterSelector)

func (*TemplateParameter) Descriptor

func (*TemplateParameter) Descriptor() ([]byte, []int)

Deprecated: Use TemplateParameter.ProtoReflect.Descriptor instead.

func (*TemplateParameter) GetDescription

func (x *TemplateParameter) GetDescription() string

func (*TemplateParameter) GetFields

func (x *TemplateParameter) GetFields() []string

func (*TemplateParameter) GetName

func (x *TemplateParameter) GetName() string

func (*TemplateParameter) GetValidation

func (x *TemplateParameter) GetValidation() *ParameterValidation

func (*TemplateParameter) ProtoMessage

func (*TemplateParameter) ProtoMessage()

func (*TemplateParameter) ProtoReflect

func (x *TemplateParameter) ProtoReflect() protoreflect.Message

func (*TemplateParameter) Reset

func (x *TemplateParameter) Reset()

func (*TemplateParameter) String

func (x *TemplateParameter) String() string

UnimplementedAutoscalingPolicyServiceServer

type UnimplementedAutoscalingPolicyServiceServer struct {
}

UnimplementedAutoscalingPolicyServiceServer can be embedded to have forward compatible implementations.

func (*UnimplementedAutoscalingPolicyServiceServer) CreateAutoscalingPolicy

func (*UnimplementedAutoscalingPolicyServiceServer) DeleteAutoscalingPolicy

func (*UnimplementedAutoscalingPolicyServiceServer) GetAutoscalingPolicy

func (*UnimplementedAutoscalingPolicyServiceServer) ListAutoscalingPolicies

func (*UnimplementedAutoscalingPolicyServiceServer) UpdateAutoscalingPolicy

UnimplementedBatchControllerServer

type UnimplementedBatchControllerServer struct {
}

UnimplementedBatchControllerServer can be embedded to have forward compatible implementations.

func (*UnimplementedBatchControllerServer) CreateBatch

func (*UnimplementedBatchControllerServer) DeleteBatch

func (*UnimplementedBatchControllerServer) GetBatch

func (*UnimplementedBatchControllerServer) ListBatches

UnimplementedClusterControllerServer

type UnimplementedClusterControllerServer struct {
}

UnimplementedClusterControllerServer can be embedded to have forward compatible implementations.

func (*UnimplementedClusterControllerServer) CreateCluster

func (*UnimplementedClusterControllerServer) DeleteCluster

func (*UnimplementedClusterControllerServer) DiagnoseCluster

func (*UnimplementedClusterControllerServer) GetCluster

func (*UnimplementedClusterControllerServer) ListClusters

func (*UnimplementedClusterControllerServer) StartCluster

func (*UnimplementedClusterControllerServer) StopCluster

func (*UnimplementedClusterControllerServer) UpdateCluster

UnimplementedJobControllerServer

type UnimplementedJobControllerServer struct {
}

UnimplementedJobControllerServer can be embedded to have forward compatible implementations.

func (*UnimplementedJobControllerServer) CancelJob

func (*UnimplementedJobControllerServer) DeleteJob

func (*UnimplementedJobControllerServer) GetJob

func (*UnimplementedJobControllerServer) ListJobs

func (*UnimplementedJobControllerServer) SubmitJob

func (*UnimplementedJobControllerServer) SubmitJobAsOperation

func (*UnimplementedJobControllerServer) UpdateJob

UnimplementedNodeGroupControllerServer

type UnimplementedNodeGroupControllerServer struct {
}

UnimplementedNodeGroupControllerServer can be embedded to have forward compatible implementations.

func (*UnimplementedNodeGroupControllerServer) CreateNodeGroup

func (*UnimplementedNodeGroupControllerServer) GetNodeGroup

func (*UnimplementedNodeGroupControllerServer) ResizeNodeGroup

UnimplementedWorkflowTemplateServiceServer

type UnimplementedWorkflowTemplateServiceServer struct {
}

UnimplementedWorkflowTemplateServiceServer can be embedded to have forward compatible implementations.

func (*UnimplementedWorkflowTemplateServiceServer) CreateWorkflowTemplate

func (*UnimplementedWorkflowTemplateServiceServer) DeleteWorkflowTemplate

func (*UnimplementedWorkflowTemplateServiceServer) GetWorkflowTemplate

func (*UnimplementedWorkflowTemplateServiceServer) InstantiateInlineWorkflowTemplate

func (*UnimplementedWorkflowTemplateServiceServer) InstantiateWorkflowTemplate

func (*UnimplementedWorkflowTemplateServiceServer) ListWorkflowTemplates

func (*UnimplementedWorkflowTemplateServiceServer) UpdateWorkflowTemplate

UpdateAutoscalingPolicyRequest

type UpdateAutoscalingPolicyRequest struct {

	// Required. The updated autoscaling policy.
	Policy *AutoscalingPolicy `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
	// contains filtered or unexported fields
}

A request to update an autoscaling policy.

func (*UpdateAutoscalingPolicyRequest) Descriptor

func (*UpdateAutoscalingPolicyRequest) Descriptor() ([]byte, []int)

Deprecated: Use UpdateAutoscalingPolicyRequest.ProtoReflect.Descriptor instead.

func (*UpdateAutoscalingPolicyRequest) GetPolicy

func (*UpdateAutoscalingPolicyRequest) ProtoMessage

func (*UpdateAutoscalingPolicyRequest) ProtoMessage()

func (*UpdateAutoscalingPolicyRequest) ProtoReflect

func (*UpdateAutoscalingPolicyRequest) Reset

func (x *UpdateAutoscalingPolicyRequest) Reset()

func (*UpdateAutoscalingPolicyRequest) String

UpdateClusterRequest

type UpdateClusterRequest struct {
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`

	Region string `protobuf:"bytes,5,opt,name=region,proto3" json:"region,omitempty"`

	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`

	Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"`

	GracefulDecommissionTimeout *durationpb.Duration "" /* 144 byte string literal not displayed */

	UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,4,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`

	RequestId string `protobuf:"bytes,7,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`

}

A request to update a cluster.

func (*UpdateClusterRequest) Descriptor

func (*UpdateClusterRequest) Descriptor() ([]byte, []int)

Deprecated: Use UpdateClusterRequest.ProtoReflect.Descriptor instead.

func (*UpdateClusterRequest) GetCluster

func (x *UpdateClusterRequest) GetCluster() *Cluster

func (*UpdateClusterRequest) GetClusterName

func (x *UpdateClusterRequest) GetClusterName() string

func (*UpdateClusterRequest) GetGracefulDecommissionTimeout

func (x *UpdateClusterRequest) GetGracefulDecommissionTimeout() *durationpb.Duration

func (*UpdateClusterRequest) GetProjectId

func (x *UpdateClusterRequest) GetProjectId() string

func (*UpdateClusterRequest) GetRegion

func (x *UpdateClusterRequest) GetRegion() string

func (*UpdateClusterRequest) GetRequestId

func (x *UpdateClusterRequest) GetRequestId() string

func (*UpdateClusterRequest) GetUpdateMask

func (x *UpdateClusterRequest) GetUpdateMask() *fieldmaskpb.FieldMask

func (*UpdateClusterRequest) ProtoMessage

func (*UpdateClusterRequest) ProtoMessage()

func (*UpdateClusterRequest) ProtoReflect

func (x *UpdateClusterRequest) ProtoReflect() protoreflect.Message

func (*UpdateClusterRequest) Reset

func (x *UpdateClusterRequest) Reset()

func (*UpdateClusterRequest) String

func (x *UpdateClusterRequest) String() string

UpdateJobRequest

type UpdateJobRequest struct {

	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,2,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job ID.
	JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	// Required. The changes to the job.
	Job *Job `protobuf:"bytes,4,opt,name=job,proto3" json:"job,omitempty"`
	// Required. Specifies the path, relative to Job, of
	// the field to update. For example, to update the labels of a Job the
	// update_mask parameter would be specified as
	// labels, and the `PATCH` request body would specify the new
	// value. Note: Currently, labels is the only
	// field that can be updated.
	UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
	// contains filtered or unexported fields
}

A request to update a job.

func (*UpdateJobRequest) Descriptor

func (*UpdateJobRequest) Descriptor() ([]byte, []int)

Deprecated: Use UpdateJobRequest.ProtoReflect.Descriptor instead.

func (*UpdateJobRequest) GetJob

func (x *UpdateJobRequest) GetJob() *Job

func (*UpdateJobRequest) GetJobId

func (x *UpdateJobRequest) GetJobId() string

func (*UpdateJobRequest) GetProjectId

func (x *UpdateJobRequest) GetProjectId() string

func (*UpdateJobRequest) GetRegion

func (x *UpdateJobRequest) GetRegion() string

func (*UpdateJobRequest) GetUpdateMask

func (x *UpdateJobRequest) GetUpdateMask() *fieldmaskpb.FieldMask

func (*UpdateJobRequest) ProtoMessage

func (*UpdateJobRequest) ProtoMessage()

func (*UpdateJobRequest) ProtoReflect

func (x *UpdateJobRequest) ProtoReflect() protoreflect.Message

func (*UpdateJobRequest) Reset

func (x *UpdateJobRequest) Reset()

func (*UpdateJobRequest) String

func (x *UpdateJobRequest) String() string

UpdateWorkflowTemplateRequest

type UpdateWorkflowTemplateRequest struct {

	// Required. The updated workflow template.
	//
	// The `template.version` field must match the current version.
	Template *WorkflowTemplate `protobuf:"bytes,1,opt,name=template,proto3" json:"template,omitempty"`
	// contains filtered or unexported fields
}

A request to update a workflow template.

func (*UpdateWorkflowTemplateRequest) Descriptor

func (*UpdateWorkflowTemplateRequest) Descriptor() ([]byte, []int)

Deprecated: Use UpdateWorkflowTemplateRequest.ProtoReflect.Descriptor instead.

func (*UpdateWorkflowTemplateRequest) GetTemplate

func (*UpdateWorkflowTemplateRequest) ProtoMessage

func (*UpdateWorkflowTemplateRequest) ProtoMessage()

func (*UpdateWorkflowTemplateRequest) ProtoReflect

func (*UpdateWorkflowTemplateRequest) Reset

func (x *UpdateWorkflowTemplateRequest) Reset()

func (*UpdateWorkflowTemplateRequest) String

ValueValidation

type ValueValidation struct {

	// Required. List of allowed values for the parameter.
	Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
	// contains filtered or unexported fields
}

Validation based on a list of allowed values.

func (*ValueValidation) Descriptor

func (*ValueValidation) Descriptor() ([]byte, []int)

Deprecated: Use ValueValidation.ProtoReflect.Descriptor instead.

func (*ValueValidation) GetValues

func (x *ValueValidation) GetValues() []string

func (*ValueValidation) ProtoMessage

func (*ValueValidation) ProtoMessage()

func (*ValueValidation) ProtoReflect

func (x *ValueValidation) ProtoReflect() protoreflect.Message

func (*ValueValidation) Reset

func (x *ValueValidation) Reset()

func (*ValueValidation) String

func (x *ValueValidation) String() string

VirtualClusterConfig

type VirtualClusterConfig struct {
	StagingBucket string `protobuf:"bytes,1,opt,name=staging_bucket,json=stagingBucket,proto3" json:"staging_bucket,omitempty"`

	InfrastructureConfig isVirtualClusterConfig_InfrastructureConfig `protobuf_oneof:"infrastructure_config"`

	AuxiliaryServicesConfig *AuxiliaryServicesConfig "" /* 132 byte string literal not displayed */

}

The Dataproc cluster config for a cluster that does not directly control the underlying compute resources, such as a Dataproc-on-GKE cluster.

func (*VirtualClusterConfig) Descriptor

func (*VirtualClusterConfig) Descriptor() ([]byte, []int)

Deprecated: Use VirtualClusterConfig.ProtoReflect.Descriptor instead.

func (*VirtualClusterConfig) GetAuxiliaryServicesConfig

func (x *VirtualClusterConfig) GetAuxiliaryServicesConfig() *AuxiliaryServicesConfig

func (*VirtualClusterConfig) GetInfrastructureConfig

func (m *VirtualClusterConfig) GetInfrastructureConfig() isVirtualClusterConfig_InfrastructureConfig

func (*VirtualClusterConfig) GetKubernetesClusterConfig

func (x *VirtualClusterConfig) GetKubernetesClusterConfig() *KubernetesClusterConfig

func (*VirtualClusterConfig) GetStagingBucket

func (x *VirtualClusterConfig) GetStagingBucket() string

func (*VirtualClusterConfig) ProtoMessage

func (*VirtualClusterConfig) ProtoMessage()

func (*VirtualClusterConfig) ProtoReflect

func (x *VirtualClusterConfig) ProtoReflect() protoreflect.Message

func (*VirtualClusterConfig) Reset

func (x *VirtualClusterConfig) Reset()

func (*VirtualClusterConfig) String

func (x *VirtualClusterConfig) String() string

VirtualClusterConfig_KubernetesClusterConfig

type VirtualClusterConfig_KubernetesClusterConfig struct {
	// Required. The configuration for running the Dataproc cluster on
	// Kubernetes.
	KubernetesClusterConfig *KubernetesClusterConfig `protobuf:"bytes,6,opt,name=kubernetes_cluster_config,json=kubernetesClusterConfig,proto3,oneof"`
}

WorkflowGraph

type WorkflowGraph struct {

	// Output only. The workflow nodes.
	Nodes []*WorkflowNode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
	// contains filtered or unexported fields
}

The workflow graph.

func (*WorkflowGraph) Descriptor

func (*WorkflowGraph) Descriptor() ([]byte, []int)

Deprecated: Use WorkflowGraph.ProtoReflect.Descriptor instead.

func (*WorkflowGraph) GetNodes

func (x *WorkflowGraph) GetNodes() []*WorkflowNode

func (*WorkflowGraph) ProtoMessage

func (*WorkflowGraph) ProtoMessage()

func (*WorkflowGraph) ProtoReflect

func (x *WorkflowGraph) ProtoReflect() protoreflect.Message

func (*WorkflowGraph) Reset

func (x *WorkflowGraph) Reset()

func (*WorkflowGraph) String

func (x *WorkflowGraph) String() string

WorkflowMetadata

type WorkflowMetadata struct {
	Template string `protobuf:"bytes,1,opt,name=template,proto3" json:"template,omitempty"`

	Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`

	CreateCluster *ClusterOperation `protobuf:"bytes,3,opt,name=create_cluster,json=createCluster,proto3" json:"create_cluster,omitempty"`

	Graph *WorkflowGraph `protobuf:"bytes,4,opt,name=graph,proto3" json:"graph,omitempty"`

	DeleteCluster *ClusterOperation `protobuf:"bytes,5,opt,name=delete_cluster,json=deleteCluster,proto3" json:"delete_cluster,omitempty"`

	State WorkflowMetadata_State `protobuf:"varint,6,opt,name=state,proto3,enum=google.cloud.dataproc.v1.WorkflowMetadata_State" json:"state,omitempty"`

	ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`

	Parameters map[string]string "" /* 161 byte string literal not displayed */

	StartTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`

	EndTime *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`

	ClusterUuid string `protobuf:"bytes,11,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`

	DagTimeout *durationpb.Duration `protobuf:"bytes,12,opt,name=dag_timeout,json=dagTimeout,proto3" json:"dag_timeout,omitempty"`

	DagStartTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=dag_start_time,json=dagStartTime,proto3" json:"dag_start_time,omitempty"`

	DagEndTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=dag_end_time,json=dagEndTime,proto3" json:"dag_end_time,omitempty"`

}

A Dataproc workflow template resource.

func (*WorkflowMetadata) Descriptor

func (*WorkflowMetadata) Descriptor() ([]byte, []int)

Deprecated: Use WorkflowMetadata.ProtoReflect.Descriptor instead.

func (*WorkflowMetadata) GetClusterName

func (x *WorkflowMetadata) GetClusterName() string

func (*WorkflowMetadata) GetClusterUuid

func (x *WorkflowMetadata) GetClusterUuid() string

func (*WorkflowMetadata) GetCreateCluster

func (x *WorkflowMetadata) GetCreateCluster() *ClusterOperation

func (*WorkflowMetadata) GetDagEndTime

func (x *WorkflowMetadata) GetDagEndTime() *timestamppb.Timestamp

func (*WorkflowMetadata) GetDagStartTime

func (x *WorkflowMetadata) GetDagStartTime() *timestamppb.Timestamp

func (*WorkflowMetadata) GetDagTimeout

func (x *WorkflowMetadata) GetDagTimeout() *durationpb.Duration

func (*WorkflowMetadata) GetDeleteCluster

func (x *WorkflowMetadata) GetDeleteCluster() *ClusterOperation

func (*WorkflowMetadata) GetEndTime

func (x *WorkflowMetadata) GetEndTime() *timestamppb.Timestamp

func (*WorkflowMetadata) GetGraph

func (x *WorkflowMetadata) GetGraph() *WorkflowGraph

func (*WorkflowMetadata) GetParameters

func (x *WorkflowMetadata) GetParameters() map[string]string

func (*WorkflowMetadata) GetStartTime

func (x *WorkflowMetadata) GetStartTime() *timestamppb.Timestamp

func (*WorkflowMetadata) GetState

func (*WorkflowMetadata) GetTemplate

func (x *WorkflowMetadata) GetTemplate() string

func (*WorkflowMetadata) GetVersion

func (x *WorkflowMetadata) GetVersion() int32

func (*WorkflowMetadata) ProtoMessage

func (*WorkflowMetadata) ProtoMessage()

func (*WorkflowMetadata) ProtoReflect

func (x *WorkflowMetadata) ProtoReflect() protoreflect.Message

func (*WorkflowMetadata) Reset

func (x *WorkflowMetadata) Reset()

func (*WorkflowMetadata) String

func (x *WorkflowMetadata) String() string

WorkflowMetadata_State

type WorkflowMetadata_State int32

The operation state.

WorkflowMetadata_UNKNOWN, WorkflowMetadata_PENDING, WorkflowMetadata_RUNNING, WorkflowMetadata_DONE

const (
	// Unused.
	WorkflowMetadata_UNKNOWN WorkflowMetadata_State = 0
	// The operation has been created.
	WorkflowMetadata_PENDING WorkflowMetadata_State = 1
	// The operation is running.
	WorkflowMetadata_RUNNING WorkflowMetadata_State = 2
	// The operation is done; either cancelled or completed.
	WorkflowMetadata_DONE WorkflowMetadata_State = 3
)

func (WorkflowMetadata_State) Descriptor

func (WorkflowMetadata_State) Enum

func (WorkflowMetadata_State) EnumDescriptor

func (WorkflowMetadata_State) EnumDescriptor() ([]byte, []int)

Deprecated: Use WorkflowMetadata_State.Descriptor instead.

func (WorkflowMetadata_State) Number

func (WorkflowMetadata_State) String

func (x WorkflowMetadata_State) String() string

func (WorkflowMetadata_State) Type

WorkflowNode

type WorkflowNode struct {

	// Output only. The name of the node.
	StepId string `protobuf:"bytes,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"`
	// Output only. Node's prerequisite nodes.
	PrerequisiteStepIds []string `protobuf:"bytes,2,rep,name=prerequisite_step_ids,json=prerequisiteStepIds,proto3" json:"prerequisite_step_ids,omitempty"`
	// Output only. The job id; populated after the node enters RUNNING state.
	JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	// Output only. The node state.
	State WorkflowNode_NodeState `protobuf:"varint,5,opt,name=state,proto3,enum=google.cloud.dataproc.v1.WorkflowNode_NodeState" json:"state,omitempty"`
	// Output only. The error detail.
	Error string `protobuf:"bytes,6,opt,name=error,proto3" json:"error,omitempty"`
	// contains filtered or unexported fields
}

The workflow node.

func (*WorkflowNode) Descriptor

func (*WorkflowNode) Descriptor() ([]byte, []int)

Deprecated: Use WorkflowNode.ProtoReflect.Descriptor instead.

func (*WorkflowNode) GetError

func (x *WorkflowNode) GetError() string

func (*WorkflowNode) GetJobId

func (x *WorkflowNode) GetJobId() string

func (*WorkflowNode) GetPrerequisiteStepIds

func (x *WorkflowNode) GetPrerequisiteStepIds() []string

func (*WorkflowNode) GetState

func (x *WorkflowNode) GetState() WorkflowNode_NodeState

func (*WorkflowNode) GetStepId

func (x *WorkflowNode) GetStepId() string

func (*WorkflowNode) ProtoMessage

func (*WorkflowNode) ProtoMessage()

func (*WorkflowNode) ProtoReflect

func (x *WorkflowNode) ProtoReflect() protoreflect.Message

func (*WorkflowNode) Reset

func (x *WorkflowNode) Reset()

func (*WorkflowNode) String

func (x *WorkflowNode) String() string

WorkflowNode_NodeState

type WorkflowNode_NodeState int32

The workflow node state.

WorkflowNode_NODE_STATE_UNSPECIFIED, WorkflowNode_BLOCKED, WorkflowNode_RUNNABLE, WorkflowNode_RUNNING, WorkflowNode_COMPLETED, WorkflowNode_FAILED

const (
	// State is unspecified.
	WorkflowNode_NODE_STATE_UNSPECIFIED WorkflowNode_NodeState = 0
	// The node is awaiting prerequisite node to finish.
	WorkflowNode_BLOCKED WorkflowNode_NodeState = 1
	// The node is runnable but not running.
	WorkflowNode_RUNNABLE WorkflowNode_NodeState = 2
	// The node is running.
	WorkflowNode_RUNNING WorkflowNode_NodeState = 3
	// The node completed successfully.
	WorkflowNode_COMPLETED WorkflowNode_NodeState = 4
	// The node failed. A node can be marked FAILED because
	// its ancestor or peer failed.
	WorkflowNode_FAILED WorkflowNode_NodeState = 5
)

func (WorkflowNode_NodeState) Descriptor

func (WorkflowNode_NodeState) Enum

func (WorkflowNode_NodeState) EnumDescriptor

func (WorkflowNode_NodeState) EnumDescriptor() ([]byte, []int)

Deprecated: Use WorkflowNode_NodeState.Descriptor instead.

func (WorkflowNode_NodeState) Number

func (WorkflowNode_NodeState) String

func (x WorkflowNode_NodeState) String() string

func (WorkflowNode_NodeState) Type

WorkflowTemplate

type WorkflowTemplate struct {
	Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`

	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`

	Version int32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"`

	CreateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`

	UpdateTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`

	Labels map[string]string "" /* 153 byte string literal not displayed */

	Placement *WorkflowTemplatePlacement `protobuf:"bytes,7,opt,name=placement,proto3" json:"placement,omitempty"`

	Jobs []*OrderedJob `protobuf:"bytes,8,rep,name=jobs,proto3" json:"jobs,omitempty"`

	Parameters []*TemplateParameter `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"`

	DagTimeout *durationpb.Duration `protobuf:"bytes,10,opt,name=dag_timeout,json=dagTimeout,proto3" json:"dag_timeout,omitempty"`

}

A Dataproc workflow template resource.

func (*WorkflowTemplate) Descriptor

func (*WorkflowTemplate) Descriptor() ([]byte, []int)

Deprecated: Use WorkflowTemplate.ProtoReflect.Descriptor instead.

func (*WorkflowTemplate) GetCreateTime

func (x *WorkflowTemplate) GetCreateTime() *timestamppb.Timestamp

func (*WorkflowTemplate) GetDagTimeout

func (x *WorkflowTemplate) GetDagTimeout() *durationpb.Duration

func (*WorkflowTemplate) GetId

func (x *WorkflowTemplate) GetId() string

func (*WorkflowTemplate) GetJobs

func (x *WorkflowTemplate) GetJobs() []*OrderedJob

func (*WorkflowTemplate) GetLabels

func (x *WorkflowTemplate) GetLabels() map[string]string

func (*WorkflowTemplate) GetName

func (x *WorkflowTemplate) GetName() string

func (*WorkflowTemplate) GetParameters

func (x *WorkflowTemplate) GetParameters() []*TemplateParameter

func (*WorkflowTemplate) GetPlacement

func (x *WorkflowTemplate) GetPlacement() *WorkflowTemplatePlacement

func (*WorkflowTemplate) GetUpdateTime

func (x *WorkflowTemplate) GetUpdateTime() *timestamppb.Timestamp

func (*WorkflowTemplate) GetVersion

func (x *WorkflowTemplate) GetVersion() int32

func (*WorkflowTemplate) ProtoMessage

func (*WorkflowTemplate) ProtoMessage()

func (*WorkflowTemplate) ProtoReflect

func (x *WorkflowTemplate) ProtoReflect() protoreflect.Message

func (*WorkflowTemplate) Reset

func (x *WorkflowTemplate) Reset()

func (*WorkflowTemplate) String

func (x *WorkflowTemplate) String() string

WorkflowTemplatePlacement

type WorkflowTemplatePlacement struct {

	// Required. Specifies where workflow executes; either on a managed
	// cluster or an existing cluster chosen by labels.
	//
	// Types that are assignable to Placement:
	//
	//	*WorkflowTemplatePlacement_ManagedCluster
	//	*WorkflowTemplatePlacement_ClusterSelector
	Placement isWorkflowTemplatePlacement_Placement `protobuf_oneof:"placement"`
	// contains filtered or unexported fields
}

Specifies workflow execution target.

Either managed_cluster or cluster_selector is required.

func (*WorkflowTemplatePlacement) Descriptor

func (*WorkflowTemplatePlacement) Descriptor() ([]byte, []int)

Deprecated: Use WorkflowTemplatePlacement.ProtoReflect.Descriptor instead.

func (*WorkflowTemplatePlacement) GetClusterSelector

func (x *WorkflowTemplatePlacement) GetClusterSelector() *ClusterSelector

func (*WorkflowTemplatePlacement) GetManagedCluster

func (x *WorkflowTemplatePlacement) GetManagedCluster() *ManagedCluster

func (*WorkflowTemplatePlacement) GetPlacement

func (m *WorkflowTemplatePlacement) GetPlacement() isWorkflowTemplatePlacement_Placement

func (*WorkflowTemplatePlacement) ProtoMessage

func (*WorkflowTemplatePlacement) ProtoMessage()

func (*WorkflowTemplatePlacement) ProtoReflect

func (*WorkflowTemplatePlacement) Reset

func (x *WorkflowTemplatePlacement) Reset()

func (*WorkflowTemplatePlacement) String

func (x *WorkflowTemplatePlacement) String() string

WorkflowTemplatePlacement_ClusterSelector

type WorkflowTemplatePlacement_ClusterSelector struct {
	// Optional. A selector that chooses target cluster for jobs based
	// on metadata.
	//
	// The selector is evaluated at the time each job is submitted.
	ClusterSelector *ClusterSelector `protobuf:"bytes,2,opt,name=cluster_selector,json=clusterSelector,proto3,oneof"`
}

WorkflowTemplatePlacement_ManagedCluster

type WorkflowTemplatePlacement_ManagedCluster struct {
	// A cluster that is managed by the workflow.
	ManagedCluster *ManagedCluster `protobuf:"bytes,1,opt,name=managed_cluster,json=managedCluster,proto3,oneof"`
}

WorkflowTemplateServiceClient

type WorkflowTemplateServiceClient interface {
	// Creates new workflow template.
	CreateWorkflowTemplate(ctx context.Context, in *CreateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error)
	// Retrieves the latest workflow template.
	//
	// Can retrieve previously instantiated template by specifying optional
	// version parameter.
	GetWorkflowTemplate(ctx context.Context, in *GetWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error)
	// Instantiates a template and begins execution.
	//
	// The returned Operation can be used to track execution of
	// workflow by polling
	// [operations.get][google.longrunning.Operations.GetOperation].
	// The Operation will complete when entire workflow is finished.
	//
	// The running workflow can be aborted via
	// [operations.cancel][google.longrunning.Operations.CancelOperation].
	// This will cause any inflight jobs to be cancelled and workflow-owned
	// clusters to be deleted.
	//
	// The [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata).
	// Also see [Using
	// WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).
	//
	// On successful completion,
	// [Operation.response][google.longrunning.Operation.response] will be
	// [Empty][google.protobuf.Empty].
	InstantiateWorkflowTemplate(ctx context.Context, in *InstantiateWorkflowTemplateRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Instantiates a template and begins execution.
	//
	// This method is equivalent to executing the sequence
	// [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
	// [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
	//
	// The returned Operation can be used to track execution of
	// workflow by polling
	// [operations.get][google.longrunning.Operations.GetOperation].
	// The Operation will complete when entire workflow is finished.
	//
	// The running workflow can be aborted via
	// [operations.cancel][google.longrunning.Operations.CancelOperation].
	// This will cause any inflight jobs to be cancelled and workflow-owned
	// clusters to be deleted.
	//
	// The [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata).
	// Also see [Using
	// WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).
	//
	// On successful completion,
	// [Operation.response][google.longrunning.Operation.response] will be
	// [Empty][google.protobuf.Empty].
	InstantiateInlineWorkflowTemplate(ctx context.Context, in *InstantiateInlineWorkflowTemplateRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Updates (replaces) workflow template. The updated template
	// must contain version that matches the current server version.
	UpdateWorkflowTemplate(ctx context.Context, in *UpdateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error)
	// Lists workflows that match the specified filter in the request.
	ListWorkflowTemplates(ctx context.Context, in *ListWorkflowTemplatesRequest, opts ...grpc.CallOption) (*ListWorkflowTemplatesResponse, error)
	// Deletes a workflow template. It does not cancel in-progress workflows.
	DeleteWorkflowTemplate(ctx context.Context, in *DeleteWorkflowTemplateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}

WorkflowTemplateServiceClient is the client API for WorkflowTemplateService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewWorkflowTemplateServiceClient

func NewWorkflowTemplateServiceClient(cc grpc.ClientConnInterface) WorkflowTemplateServiceClient

WorkflowTemplateServiceServer

type WorkflowTemplateServiceServer interface {
	// Creates new workflow template.
	CreateWorkflowTemplate(context.Context, *CreateWorkflowTemplateRequest) (*WorkflowTemplate, error)
	// Retrieves the latest workflow template.
	//
	// Can retrieve previously instantiated template by specifying optional
	// version parameter.
	GetWorkflowTemplate(context.Context, *GetWorkflowTemplateRequest) (*WorkflowTemplate, error)
	// Instantiates a template and begins execution.
	//
	// The returned Operation can be used to track execution of
	// workflow by polling
	// [operations.get][google.longrunning.Operations.GetOperation].
	// The Operation will complete when entire workflow is finished.
	//
	// The running workflow can be aborted via
	// [operations.cancel][google.longrunning.Operations.CancelOperation].
	// This will cause any inflight jobs to be cancelled and workflow-owned
	// clusters to be deleted.
	//
	// The [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata).
	// Also see [Using
	// WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).
	//
	// On successful completion,
	// [Operation.response][google.longrunning.Operation.response] will be
	// [Empty][google.protobuf.Empty].
	InstantiateWorkflowTemplate(context.Context, *InstantiateWorkflowTemplateRequest) (*longrunning.Operation, error)
	// Instantiates a template and begins execution.
	//
	// This method is equivalent to executing the sequence
	// [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
	// [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
	//
	// The returned Operation can be used to track execution of
	// workflow by polling
	// [operations.get][google.longrunning.Operations.GetOperation].
	// The Operation will complete when entire workflow is finished.
	//
	// The running workflow can be aborted via
	// [operations.cancel][google.longrunning.Operations.CancelOperation].
	// This will cause any inflight jobs to be cancelled and workflow-owned
	// clusters to be deleted.
	//
	// The [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata).
	// Also see [Using
	// WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).
	//
	// On successful completion,
	// [Operation.response][google.longrunning.Operation.response] will be
	// [Empty][google.protobuf.Empty].
	InstantiateInlineWorkflowTemplate(context.Context, *InstantiateInlineWorkflowTemplateRequest) (*longrunning.Operation, error)
	// Updates (replaces) workflow template. The updated template
	// must contain version that matches the current server version.
	UpdateWorkflowTemplate(context.Context, *UpdateWorkflowTemplateRequest) (*WorkflowTemplate, error)
	// Lists workflows that match the specified filter in the request.
	ListWorkflowTemplates(context.Context, *ListWorkflowTemplatesRequest) (*ListWorkflowTemplatesResponse, error)
	// Deletes a workflow template. It does not cancel in-progress workflows.
	DeleteWorkflowTemplate(context.Context, *DeleteWorkflowTemplateRequest) (*emptypb.Empty, error)
}

WorkflowTemplateServiceServer is the server API for WorkflowTemplateService service.

YarnApplication

type YarnApplication struct {

	// Required. The application name.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Required. The application state.
	State YarnApplication_State `protobuf:"varint,2,opt,name=state,proto3,enum=google.cloud.dataproc.v1.YarnApplication_State" json:"state,omitempty"`
	// Required. The numerical progress of the application, from 1 to 100.
	Progress float32 `protobuf:"fixed32,3,opt,name=progress,proto3" json:"progress,omitempty"`
	// Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
	// TimelineServer that provides application-specific information. The URL uses
	// the internal hostname, and requires a proxy server for resolution and,
	// possibly, access.
	TrackingUrl string `protobuf:"bytes,4,opt,name=tracking_url,json=trackingUrl,proto3" json:"tracking_url,omitempty"`
	// contains filtered or unexported fields
}

A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.

Beta Feature: This report is available for testing purposes only. It may be changed before final release.

func (*YarnApplication) Descriptor

func (*YarnApplication) Descriptor() ([]byte, []int)

Deprecated: Use YarnApplication.ProtoReflect.Descriptor instead.

func (*YarnApplication) GetName

func (x *YarnApplication) GetName() string

func (*YarnApplication) GetProgress

func (x *YarnApplication) GetProgress() float32

func (*YarnApplication) GetState

func (x *YarnApplication) GetState() YarnApplication_State

func (*YarnApplication) GetTrackingUrl

func (x *YarnApplication) GetTrackingUrl() string

func (*YarnApplication) ProtoMessage

func (*YarnApplication) ProtoMessage()

func (*YarnApplication) ProtoReflect

func (x *YarnApplication) ProtoReflect() protoreflect.Message

func (*YarnApplication) Reset

func (x *YarnApplication) Reset()

func (*YarnApplication) String

func (x *YarnApplication) String() string

YarnApplication_State

type YarnApplication_State int32

The application state, corresponding to YarnProtos.YarnApplicationStateProto.

YarnApplication_STATE_UNSPECIFIED, YarnApplication_NEW, YarnApplication_NEW_SAVING, YarnApplication_SUBMITTED, YarnApplication_ACCEPTED, YarnApplication_RUNNING, YarnApplication_FINISHED, YarnApplication_FAILED, YarnApplication_KILLED

const (
	// Status is unspecified.
	YarnApplication_STATE_UNSPECIFIED YarnApplication_State = 0
	// Status is NEW.
	YarnApplication_NEW YarnApplication_State = 1
	// Status is NEW_SAVING.
	YarnApplication_NEW_SAVING YarnApplication_State = 2
	// Status is SUBMITTED.
	YarnApplication_SUBMITTED YarnApplication_State = 3
	// Status is ACCEPTED.
	YarnApplication_ACCEPTED YarnApplication_State = 4
	// Status is RUNNING.
	YarnApplication_RUNNING YarnApplication_State = 5
	// Status is FINISHED.
	YarnApplication_FINISHED YarnApplication_State = 6
	// Status is FAILED.
	YarnApplication_FAILED YarnApplication_State = 7
	// Status is KILLED.
	YarnApplication_KILLED YarnApplication_State = 8
)

func (YarnApplication_State) Descriptor

func (YarnApplication_State) Enum

func (YarnApplication_State) EnumDescriptor

func (YarnApplication_State) EnumDescriptor() ([]byte, []int)

Deprecated: Use YarnApplication_State.Descriptor instead.

func (YarnApplication_State) Number

func (YarnApplication_State) String

func (x YarnApplication_State) String() string

func (YarnApplication_State) Type