Reference documentation and code samples for the BigQuery Storage V1 API class Google::Cloud::Bigquery::Storage::V1::ReadSession.
Information about the ReadSession.
Inherits
- Object
Extended By
- Google::Protobuf::MessageExts::ClassMethods
Includes
- Google::Protobuf::MessageExts
Methods
#arrow_schema
def arrow_schema() -> ::Google::Cloud::Bigquery::Storage::V1::ArrowSchema
- (::Google::Cloud::Bigquery::Storage::V1::ArrowSchema) — Output only. Arrow schema.
#avro_schema
def avro_schema() -> ::Google::Cloud::Bigquery::Storage::V1::AvroSchema
- (::Google::Cloud::Bigquery::Storage::V1::AvroSchema) — Output only. Avro schema.
#data_format
def data_format() -> ::Google::Cloud::Bigquery::Storage::V1::DataFormat
- (::Google::Cloud::Bigquery::Storage::V1::DataFormat) — Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
#data_format=
def data_format=(value) -> ::Google::Cloud::Bigquery::Storage::V1::DataFormat
- value (::Google::Cloud::Bigquery::Storage::V1::DataFormat) — Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
- (::Google::Cloud::Bigquery::Storage::V1::DataFormat) — Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
#estimated_row_count
def estimated_row_count() -> ::Integer
- (::Integer) — Output only. An estimate on the number of rows present in this session's streams. This estimate is based on metadata from the table which might be incomplete or stale.
#estimated_total_bytes_scanned
def estimated_total_bytes_scanned() -> ::Integer
- (::Integer) — Output only. An estimate on the number of bytes this session will scan when all streams are completely consumed. This estimate is based on metadata from the table which might be incomplete or stale.
#estimated_total_physical_file_size
def estimated_total_physical_file_size() -> ::Integer
- (::Integer) — Output only. A pre-projected estimate of the total physical size (in bytes) of files this session will scan when all streams are completely consumed. This estimate does not depend on the selected columns and can be based on metadata from the table which might be incomplete or stale. Only set for BigLake tables.
#expire_time
def expire_time() -> ::Google::Protobuf::Timestamp
- (::Google::Protobuf::Timestamp) — Output only. Time at which the session becomes invalid. After this time, subsequent requests to read this Session will return errors. The expire_time is automatically assigned and currently cannot be specified or updated.
#name
def name() -> ::String
-
(::String) — Output only. Unique identifier for the session, in the form
projects/{project_id}/locations/{location}/sessions/{session_id}
.
#read_options
def read_options() -> ::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions
- (::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions) — Optional. Read options for this session (e.g. column selection, filters).
#read_options=
def read_options=(value) -> ::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions
- value (::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions) — Optional. Read options for this session (e.g. column selection, filters).
- (::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions) — Optional. Read options for this session (e.g. column selection, filters).
#streams
def streams() -> ::Array<::Google::Cloud::Bigquery::Storage::V1::ReadStream>
-
(::Array<::Google::Cloud::Bigquery::Storage::V1::ReadStream>) — Output only. A list of streams created with the session.
At least one stream is created with the session. In the future, larger request_stream_count values may result in this list being unpopulated, in that case, the user will need to use a List method to get the streams instead, which is not yet available.
#table
def table() -> ::String
-
(::String) — Immutable. Table that this ReadSession is reading from, in the form
projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
#table=
def table=(value) -> ::String
-
value (::String) — Immutable. Table that this ReadSession is reading from, in the form
projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
-
(::String) — Immutable. Table that this ReadSession is reading from, in the form
projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
#table_modifiers
def table_modifiers() -> ::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableModifiers
- (::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableModifiers) — Optional. Any modifiers which are applied when reading from the specified table.
#table_modifiers=
def table_modifiers=(value) -> ::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableModifiers
- value (::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableModifiers) — Optional. Any modifiers which are applied when reading from the specified table.
- (::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableModifiers) — Optional. Any modifiers which are applied when reading from the specified table.
#trace_id
def trace_id() -> ::String
-
(::String) — Optional. ID set by client to annotate a session identity. This does not
need to be strictly unique, but instead the same ID should be used to group
logically connected sessions (e.g. All using the same ID for all sessions
needed to complete a Spark SQL query is reasonable).
Maximum length is 256 bytes.
#trace_id=
def trace_id=(value) -> ::String
-
value (::String) — Optional. ID set by client to annotate a session identity. This does not
need to be strictly unique, but instead the same ID should be used to group
logically connected sessions (e.g. All using the same ID for all sessions
needed to complete a Spark SQL query is reasonable).
Maximum length is 256 bytes.
-
(::String) — Optional. ID set by client to annotate a session identity. This does not
need to be strictly unique, but instead the same ID should be used to group
logically connected sessions (e.g. All using the same ID for all sessions
needed to complete a Spark SQL query is reasonable).
Maximum length is 256 bytes.