Reference documentation and code samples for the Cloud Dataproc V1 API class Google::Cloud::Dataproc::V1::Job.
A Dataproc job resource.
Inherits
- Object
Extended By
- Google::Protobuf::MessageExts::ClassMethods
Includes
- Google::Protobuf::MessageExts
Methods
#done
def done() -> ::Boolean
Returns
-
(::Boolean) — Output only. Indicates whether the job is completed. If the value is
false
, the job is still in progress. Iftrue
, the job is completed, andstatus.state
field will indicate if it was successful, failed, or cancelled.
#driver_control_files_uri
def driver_control_files_uri() -> ::String
Returns
-
(::String) — Output only. If present, the location of miscellaneous control files
which may be used as part of job setup and handling. If not present,
control files may be placed in the same location as
driver_output_uri
.
#driver_output_resource_uri
def driver_output_resource_uri() -> ::String
Returns
- (::String) — Output only. A URI pointing to the location of the stdout of the job's driver program.
#driver_scheduling_config
def driver_scheduling_config() -> ::Google::Cloud::Dataproc::V1::DriverSchedulingConfig
Returns
- (::Google::Cloud::Dataproc::V1::DriverSchedulingConfig) — Optional. Driver scheduling configuration.
#driver_scheduling_config=
def driver_scheduling_config=(value) -> ::Google::Cloud::Dataproc::V1::DriverSchedulingConfig
Parameter
- value (::Google::Cloud::Dataproc::V1::DriverSchedulingConfig) — Optional. Driver scheduling configuration.
Returns
- (::Google::Cloud::Dataproc::V1::DriverSchedulingConfig) — Optional. Driver scheduling configuration.
#hadoop_job
def hadoop_job() -> ::Google::Cloud::Dataproc::V1::HadoopJob
Returns
- (::Google::Cloud::Dataproc::V1::HadoopJob) — Optional. Job is a Hadoop job.
#hadoop_job=
def hadoop_job=(value) -> ::Google::Cloud::Dataproc::V1::HadoopJob
Parameter
- value (::Google::Cloud::Dataproc::V1::HadoopJob) — Optional. Job is a Hadoop job.
Returns
- (::Google::Cloud::Dataproc::V1::HadoopJob) — Optional. Job is a Hadoop job.
#hive_job
def hive_job() -> ::Google::Cloud::Dataproc::V1::HiveJob
Returns
- (::Google::Cloud::Dataproc::V1::HiveJob) — Optional. Job is a Hive job.
#hive_job=
def hive_job=(value) -> ::Google::Cloud::Dataproc::V1::HiveJob
Parameter
- value (::Google::Cloud::Dataproc::V1::HiveJob) — Optional. Job is a Hive job.
Returns
- (::Google::Cloud::Dataproc::V1::HiveJob) — Optional. Job is a Hive job.
#job_uuid
def job_uuid() -> ::String
Returns
- (::String) — Output only. A UUID that uniquely identifies a job within the project over time. This is in contrast to a user-settable reference.job_id that may be reused over time.
#labels
def labels() -> ::Google::Protobuf::Map{::String => ::String}
Returns
- (::Google::Protobuf::Map{::String => ::String}) — Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a job.
#labels=
def labels=(value) -> ::Google::Protobuf::Map{::String => ::String}
Parameter
- value (::Google::Protobuf::Map{::String => ::String}) — Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a job.
Returns
- (::Google::Protobuf::Map{::String => ::String}) — Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035. No more than 32 labels can be associated with a job.
#pig_job
def pig_job() -> ::Google::Cloud::Dataproc::V1::PigJob
Returns
- (::Google::Cloud::Dataproc::V1::PigJob) — Optional. Job is a Pig job.
#pig_job=
def pig_job=(value) -> ::Google::Cloud::Dataproc::V1::PigJob
Parameter
- value (::Google::Cloud::Dataproc::V1::PigJob) — Optional. Job is a Pig job.
Returns
- (::Google::Cloud::Dataproc::V1::PigJob) — Optional. Job is a Pig job.
#placement
def placement() -> ::Google::Cloud::Dataproc::V1::JobPlacement
Returns
- (::Google::Cloud::Dataproc::V1::JobPlacement) — Required. Job information, including how, when, and where to run the job.
#placement=
def placement=(value) -> ::Google::Cloud::Dataproc::V1::JobPlacement
Parameter
- value (::Google::Cloud::Dataproc::V1::JobPlacement) — Required. Job information, including how, when, and where to run the job.
Returns
- (::Google::Cloud::Dataproc::V1::JobPlacement) — Required. Job information, including how, when, and where to run the job.
#presto_job
def presto_job() -> ::Google::Cloud::Dataproc::V1::PrestoJob
Returns
- (::Google::Cloud::Dataproc::V1::PrestoJob) — Optional. Job is a Presto job.
#presto_job=
def presto_job=(value) -> ::Google::Cloud::Dataproc::V1::PrestoJob
Parameter
- value (::Google::Cloud::Dataproc::V1::PrestoJob) — Optional. Job is a Presto job.
Returns
- (::Google::Cloud::Dataproc::V1::PrestoJob) — Optional. Job is a Presto job.
#pyspark_job
def pyspark_job() -> ::Google::Cloud::Dataproc::V1::PySparkJob
Returns
- (::Google::Cloud::Dataproc::V1::PySparkJob) — Optional. Job is a PySpark job.
#pyspark_job=
def pyspark_job=(value) -> ::Google::Cloud::Dataproc::V1::PySparkJob
Parameter
- value (::Google::Cloud::Dataproc::V1::PySparkJob) — Optional. Job is a PySpark job.
Returns
- (::Google::Cloud::Dataproc::V1::PySparkJob) — Optional. Job is a PySpark job.
#reference
def reference() -> ::Google::Cloud::Dataproc::V1::JobReference
Returns
-
(::Google::Cloud::Dataproc::V1::JobReference) — Optional. The fully qualified reference to the job, which can be used to
obtain the equivalent REST path of the job resource. If this property
is not specified when a job is created, the server generates a
job_id
.
#reference=
def reference=(value) -> ::Google::Cloud::Dataproc::V1::JobReference
Parameter
-
value (::Google::Cloud::Dataproc::V1::JobReference) — Optional. The fully qualified reference to the job, which can be used to
obtain the equivalent REST path of the job resource. If this property
is not specified when a job is created, the server generates a
job_id
.
Returns
-
(::Google::Cloud::Dataproc::V1::JobReference) — Optional. The fully qualified reference to the job, which can be used to
obtain the equivalent REST path of the job resource. If this property
is not specified when a job is created, the server generates a
job_id
.
#scheduling
def scheduling() -> ::Google::Cloud::Dataproc::V1::JobScheduling
Returns
- (::Google::Cloud::Dataproc::V1::JobScheduling) — Optional. Job scheduling configuration.
#scheduling=
def scheduling=(value) -> ::Google::Cloud::Dataproc::V1::JobScheduling
Parameter
- value (::Google::Cloud::Dataproc::V1::JobScheduling) — Optional. Job scheduling configuration.
Returns
- (::Google::Cloud::Dataproc::V1::JobScheduling) — Optional. Job scheduling configuration.
#spark_job
def spark_job() -> ::Google::Cloud::Dataproc::V1::SparkJob
Returns
- (::Google::Cloud::Dataproc::V1::SparkJob) — Optional. Job is a Spark job.
#spark_job=
def spark_job=(value) -> ::Google::Cloud::Dataproc::V1::SparkJob
Parameter
- value (::Google::Cloud::Dataproc::V1::SparkJob) — Optional. Job is a Spark job.
Returns
- (::Google::Cloud::Dataproc::V1::SparkJob) — Optional. Job is a Spark job.
#spark_r_job
def spark_r_job() -> ::Google::Cloud::Dataproc::V1::SparkRJob
Returns
- (::Google::Cloud::Dataproc::V1::SparkRJob) — Optional. Job is a SparkR job.
#spark_r_job=
def spark_r_job=(value) -> ::Google::Cloud::Dataproc::V1::SparkRJob
Parameter
- value (::Google::Cloud::Dataproc::V1::SparkRJob) — Optional. Job is a SparkR job.
Returns
- (::Google::Cloud::Dataproc::V1::SparkRJob) — Optional. Job is a SparkR job.
#spark_sql_job
def spark_sql_job() -> ::Google::Cloud::Dataproc::V1::SparkSqlJob
Returns
- (::Google::Cloud::Dataproc::V1::SparkSqlJob) — Optional. Job is a SparkSql job.
#spark_sql_job=
def spark_sql_job=(value) -> ::Google::Cloud::Dataproc::V1::SparkSqlJob
Parameter
- value (::Google::Cloud::Dataproc::V1::SparkSqlJob) — Optional. Job is a SparkSql job.
Returns
- (::Google::Cloud::Dataproc::V1::SparkSqlJob) — Optional. Job is a SparkSql job.
#status
def status() -> ::Google::Cloud::Dataproc::V1::JobStatus
Returns
-
(::Google::Cloud::Dataproc::V1::JobStatus) — Output only. The job status. Additional application-specific
status information may be contained in the
type_job
andyarn_applications
fields.
#status_history
def status_history() -> ::Array<::Google::Cloud::Dataproc::V1::JobStatus>
Returns
- (::Array<::Google::Cloud::Dataproc::V1::JobStatus>) — Output only. The previous job status.
#trino_job
def trino_job() -> ::Google::Cloud::Dataproc::V1::TrinoJob
Returns
- (::Google::Cloud::Dataproc::V1::TrinoJob) — Optional. Job is a Trino job.
#trino_job=
def trino_job=(value) -> ::Google::Cloud::Dataproc::V1::TrinoJob
Parameter
- value (::Google::Cloud::Dataproc::V1::TrinoJob) — Optional. Job is a Trino job.
Returns
- (::Google::Cloud::Dataproc::V1::TrinoJob) — Optional. Job is a Trino job.
#yarn_applications
def yarn_applications() -> ::Array<::Google::Cloud::Dataproc::V1::YarnApplication>
Returns
-
(::Array<::Google::Cloud::Dataproc::V1::YarnApplication>) — Output only. The collection of YARN applications spun up by this job.
Beta Feature: This report is available for testing purposes only. It may be changed before final release.