- 1.64.0 (latest)
- 1.63.1
- 1.62.0
- 1.61.0
- 1.60.0
- 1.59.1
- 1.58.0
- 1.57.1
- 1.54.0
- 1.53.0
- 1.52.0
- 1.51.2
- 1.50.0
- 1.49.0
- 1.48.0
- 1.47.0
- 1.46.0
- 1.45.0
- 1.44.0
- 1.43.0
- 1.42.0
- 1.41.0
- 1.40.0
- 1.39.0
- 1.38.0
- 1.37.0
- 1.36.0
- 1.35.0
- 1.34.1
- 1.33.0
- 1.32.0
- 1.31.0
- 1.30.2
- 1.29.0
- 1.28.0
- 1.27.0
- 1.26.0
- 1.25.0
- 1.24.0
- 1.23.0
- 1.22.0
- 1.21.0
- 1.20.1
- 1.19.0
- 1.18.0
- 1.17.0
- 1.16.0
- 1.15.0
Variables
DataFormat_name, DataFormat_value
var (
DataFormat_name = map[int32]string{
0: "DATA_FORMAT_UNSPECIFIED",
1: "AVRO",
3: "ARROW",
}
DataFormat_value = map[string]int32{
"DATA_FORMAT_UNSPECIFIED": 0,
"AVRO": 1,
"ARROW": 3,
}
)
Enum value maps for DataFormat.
ShardingStrategy_name, ShardingStrategy_value
var (
ShardingStrategy_name = map[int32]string{
0: "SHARDING_STRATEGY_UNSPECIFIED",
1: "LIQUID",
2: "BALANCED",
}
ShardingStrategy_value = map[string]int32{
"SHARDING_STRATEGY_UNSPECIFIED": 0,
"LIQUID": 1,
"BALANCED": 2,
}
)
Enum value maps for ShardingStrategy.
File_google_cloud_bigquery_storage_v1beta1_arrow_proto
var File_google_cloud_bigquery_storage_v1beta1_arrow_proto protoreflect.FileDescriptor
File_google_cloud_bigquery_storage_v1beta1_avro_proto
var File_google_cloud_bigquery_storage_v1beta1_avro_proto protoreflect.FileDescriptor
File_google_cloud_bigquery_storage_v1beta1_read_options_proto
var File_google_cloud_bigquery_storage_v1beta1_read_options_proto protoreflect.FileDescriptor
File_google_cloud_bigquery_storage_v1beta1_storage_proto
var File_google_cloud_bigquery_storage_v1beta1_storage_proto protoreflect.FileDescriptor
File_google_cloud_bigquery_storage_v1beta1_table_reference_proto
var File_google_cloud_bigquery_storage_v1beta1_table_reference_proto protoreflect.FileDescriptor
Functions
func RegisterBigQueryStorageServer
func RegisterBigQueryStorageServer(s *grpc.Server, srv BigQueryStorageServer)
ArrowRecordBatch
type ArrowRecordBatch struct {
// IPC serialized Arrow RecordBatch.
SerializedRecordBatch []byte `protobuf:"bytes,1,opt,name=serialized_record_batch,json=serializedRecordBatch,proto3" json:"serialized_record_batch,omitempty"`
// The count of rows in the returning block.
RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
// contains filtered or unexported fields
}
Arrow RecordBatch.
func (*ArrowRecordBatch) Descriptor
func (*ArrowRecordBatch) Descriptor() ([]byte, []int)
Deprecated: Use ArrowRecordBatch.ProtoReflect.Descriptor instead.
func (*ArrowRecordBatch) GetRowCount
func (x *ArrowRecordBatch) GetRowCount() int64
func (*ArrowRecordBatch) GetSerializedRecordBatch
func (x *ArrowRecordBatch) GetSerializedRecordBatch() []byte
func (*ArrowRecordBatch) ProtoMessage
func (*ArrowRecordBatch) ProtoMessage()
func (*ArrowRecordBatch) ProtoReflect
func (x *ArrowRecordBatch) ProtoReflect() protoreflect.Message
func (*ArrowRecordBatch) Reset
func (x *ArrowRecordBatch) Reset()
func (*ArrowRecordBatch) String
func (x *ArrowRecordBatch) String() string
ArrowSchema
type ArrowSchema struct {
// IPC serialized Arrow schema.
SerializedSchema []byte `protobuf:"bytes,1,opt,name=serialized_schema,json=serializedSchema,proto3" json:"serialized_schema,omitempty"`
// contains filtered or unexported fields
}
Arrow schema.
func (*ArrowSchema) Descriptor
func (*ArrowSchema) Descriptor() ([]byte, []int)
Deprecated: Use ArrowSchema.ProtoReflect.Descriptor instead.
func (*ArrowSchema) GetSerializedSchema
func (x *ArrowSchema) GetSerializedSchema() []byte
func (*ArrowSchema) ProtoMessage
func (*ArrowSchema) ProtoMessage()
func (*ArrowSchema) ProtoReflect
func (x *ArrowSchema) ProtoReflect() protoreflect.Message
func (*ArrowSchema) Reset
func (x *ArrowSchema) Reset()
func (*ArrowSchema) String
func (x *ArrowSchema) String() string
AvroRows
type AvroRows struct {
// Binary serialized rows in a block.
SerializedBinaryRows []byte `protobuf:"bytes,1,opt,name=serialized_binary_rows,json=serializedBinaryRows,proto3" json:"serialized_binary_rows,omitempty"`
// The count of rows in the returning block.
RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
// contains filtered or unexported fields
}
Avro rows.
func (*AvroRows) Descriptor
Deprecated: Use AvroRows.ProtoReflect.Descriptor instead.
func (*AvroRows) GetRowCount
func (*AvroRows) GetSerializedBinaryRows
func (*AvroRows) ProtoMessage
func (*AvroRows) ProtoMessage()
func (*AvroRows) ProtoReflect
func (x *AvroRows) ProtoReflect() protoreflect.Message
func (*AvroRows) Reset
func (x *AvroRows) Reset()
func (*AvroRows) String
AvroSchema
type AvroSchema struct {
// Json serialized schema, as described at
// https://avro.apache.org/docs/1.8.1/spec.html
Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"`
// contains filtered or unexported fields
}
Avro schema.
func (*AvroSchema) Descriptor
func (*AvroSchema) Descriptor() ([]byte, []int)
Deprecated: Use AvroSchema.ProtoReflect.Descriptor instead.
func (*AvroSchema) GetSchema
func (x *AvroSchema) GetSchema() string
func (*AvroSchema) ProtoMessage
func (*AvroSchema) ProtoMessage()
func (*AvroSchema) ProtoReflect
func (x *AvroSchema) ProtoReflect() protoreflect.Message
func (*AvroSchema) Reset
func (x *AvroSchema) Reset()
func (*AvroSchema) String
func (x *AvroSchema) String() string
BatchCreateReadSessionStreamsRequest
type BatchCreateReadSessionStreamsRequest struct {
// Required. Must be a non-expired session obtained from a call to
// CreateReadSession. Only the name field needs to be set.
Session *ReadSession `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Required. Number of new streams requested. Must be positive.
// Number of added streams may be less than this, see CreateReadSessionRequest
// for more information.
RequestedStreams int32 `protobuf:"varint,2,opt,name=requested_streams,json=requestedStreams,proto3" json:"requested_streams,omitempty"`
// contains filtered or unexported fields
}
Information needed to request additional streams for an established read session.
func (*BatchCreateReadSessionStreamsRequest) Descriptor
func (*BatchCreateReadSessionStreamsRequest) Descriptor() ([]byte, []int)
Deprecated: Use BatchCreateReadSessionStreamsRequest.ProtoReflect.Descriptor instead.
func (*BatchCreateReadSessionStreamsRequest) GetRequestedStreams
func (x *BatchCreateReadSessionStreamsRequest) GetRequestedStreams() int32
func (*BatchCreateReadSessionStreamsRequest) GetSession
func (x *BatchCreateReadSessionStreamsRequest) GetSession() *ReadSession
func (*BatchCreateReadSessionStreamsRequest) ProtoMessage
func (*BatchCreateReadSessionStreamsRequest) ProtoMessage()
func (*BatchCreateReadSessionStreamsRequest) ProtoReflect
func (x *BatchCreateReadSessionStreamsRequest) ProtoReflect() protoreflect.Message
func (*BatchCreateReadSessionStreamsRequest) Reset
func (x *BatchCreateReadSessionStreamsRequest) Reset()
func (*BatchCreateReadSessionStreamsRequest) String
func (x *BatchCreateReadSessionStreamsRequest) String() string
BatchCreateReadSessionStreamsResponse
type BatchCreateReadSessionStreamsResponse struct {
// Newly added streams.
Streams []*Stream `protobuf:"bytes,1,rep,name=streams,proto3" json:"streams,omitempty"`
// contains filtered or unexported fields
}
The response from BatchCreateReadSessionStreams
returns the stream
identifiers for the newly created streams.
func (*BatchCreateReadSessionStreamsResponse) Descriptor
func (*BatchCreateReadSessionStreamsResponse) Descriptor() ([]byte, []int)
Deprecated: Use BatchCreateReadSessionStreamsResponse.ProtoReflect.Descriptor instead.
func (*BatchCreateReadSessionStreamsResponse) GetStreams
func (x *BatchCreateReadSessionStreamsResponse) GetStreams() []*Stream
func (*BatchCreateReadSessionStreamsResponse) ProtoMessage
func (*BatchCreateReadSessionStreamsResponse) ProtoMessage()
func (*BatchCreateReadSessionStreamsResponse) ProtoReflect
func (x *BatchCreateReadSessionStreamsResponse) ProtoReflect() protoreflect.Message
func (*BatchCreateReadSessionStreamsResponse) Reset
func (x *BatchCreateReadSessionStreamsResponse) Reset()
func (*BatchCreateReadSessionStreamsResponse) String
func (x *BatchCreateReadSessionStreamsResponse) String() string
BigQueryStorageClient
type BigQueryStorageClient interface {
// Creates a new read session. A read session divides the contents of a
// BigQuery table into one or more streams, which can then be used to read
// data from the table. The read session also specifies properties of the
// data to be read, such as a list of columns or a push-down filter describing
// the rows to be returned.
//
// A particular row can be read by at most one stream. When the caller has
// reached the end of each stream in the session, then all the data in the
// table has been read.
//
// Read sessions automatically expire 6 hours after they are created and do
// not require manual clean-up by the caller.
CreateReadSession(ctx context.Context, in *CreateReadSessionRequest, opts ...grpc.CallOption) (*ReadSession, error)
// Reads rows from the table in the format prescribed by the read session.
// Each response contains one or more table rows, up to a maximum of 10 MiB
// per response; read requests which attempt to read individual rows larger
// than this will fail.
//
// Each request also returns a set of stream statistics reflecting the
// estimated total number of rows in the read stream. This number is computed
// based on the total table size and the number of active streams in the read
// session, and may change as other streams continue to read data.
ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigQueryStorage_ReadRowsClient, error)
// Creates additional streams for a ReadSession. This API can be used to
// dynamically adjust the parallelism of a batch processing task upwards by
// adding additional workers.
BatchCreateReadSessionStreams(ctx context.Context, in *BatchCreateReadSessionStreamsRequest, opts ...grpc.CallOption) (*BatchCreateReadSessionStreamsResponse, error)
// Causes a single stream in a ReadSession to gracefully stop. This
// API can be used to dynamically adjust the parallelism of a batch processing
// task downwards without losing data.
//
// This API does not delete the stream -- it remains visible in the
// ReadSession, and any data processed by the stream is not released to other
// streams. However, no additional data will be assigned to the stream once
// this call completes. Callers must continue reading data on the stream until
// the end of the stream is reached so that data which has already been
// assigned to the stream will be processed.
//
// This method will return an error if there are no other live streams
// in the Session, or if SplitReadStream() has been called on the given
// Stream.
FinalizeStream(ctx context.Context, in *FinalizeStreamRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Splits a given read stream into two Streams. These streams are referred to
// as the primary and the residual of the split. The original stream can still
// be read from in the same manner as before. Both of the returned streams can
// also be read from, and the total rows return by both child streams will be
// the same as the rows read from the original stream.
//
// Moreover, the two child streams will be allocated back to back in the
// original Stream. Concretely, it is guaranteed that for streams Original,
// Primary, and Residual, that Original[0-j] = Primary[0-j] and
// Original[j-n] = Residual[0-m] once the streams have been read to
// completion.
//
// This method is guaranteed to be idempotent.
SplitReadStream(ctx context.Context, in *SplitReadStreamRequest, opts ...grpc.CallOption) (*SplitReadStreamResponse, error)
}
BigQueryStorageClient is the client API for BigQueryStorage service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewBigQueryStorageClient
func NewBigQueryStorageClient(cc grpc.ClientConnInterface) BigQueryStorageClient
BigQueryStorageServer
type BigQueryStorageServer interface {
// Creates a new read session. A read session divides the contents of a
// BigQuery table into one or more streams, which can then be used to read
// data from the table. The read session also specifies properties of the
// data to be read, such as a list of columns or a push-down filter describing
// the rows to be returned.
//
// A particular row can be read by at most one stream. When the caller has
// reached the end of each stream in the session, then all the data in the
// table has been read.
//
// Read sessions automatically expire 6 hours after they are created and do
// not require manual clean-up by the caller.
CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error)
// Reads rows from the table in the format prescribed by the read session.
// Each response contains one or more table rows, up to a maximum of 10 MiB
// per response; read requests which attempt to read individual rows larger
// than this will fail.
//
// Each request also returns a set of stream statistics reflecting the
// estimated total number of rows in the read stream. This number is computed
// based on the total table size and the number of active streams in the read
// session, and may change as other streams continue to read data.
ReadRows(*ReadRowsRequest, BigQueryStorage_ReadRowsServer) error
// Creates additional streams for a ReadSession. This API can be used to
// dynamically adjust the parallelism of a batch processing task upwards by
// adding additional workers.
BatchCreateReadSessionStreams(context.Context, *BatchCreateReadSessionStreamsRequest) (*BatchCreateReadSessionStreamsResponse, error)
// Causes a single stream in a ReadSession to gracefully stop. This
// API can be used to dynamically adjust the parallelism of a batch processing
// task downwards without losing data.
//
// This API does not delete the stream -- it remains visible in the
// ReadSession, and any data processed by the stream is not released to other
// streams. However, no additional data will be assigned to the stream once
// this call completes. Callers must continue reading data on the stream until
// the end of the stream is reached so that data which has already been
// assigned to the stream will be processed.
//
// This method will return an error if there are no other live streams
// in the Session, or if SplitReadStream() has been called on the given
// Stream.
FinalizeStream(context.Context, *FinalizeStreamRequest) (*emptypb.Empty, error)
// Splits a given read stream into two Streams. These streams are referred to
// as the primary and the residual of the split. The original stream can still
// be read from in the same manner as before. Both of the returned streams can
// also be read from, and the total rows return by both child streams will be
// the same as the rows read from the original stream.
//
// Moreover, the two child streams will be allocated back to back in the
// original Stream. Concretely, it is guaranteed that for streams Original,
// Primary, and Residual, that Original[0-j] = Primary[0-j] and
// Original[j-n] = Residual[0-m] once the streams have been read to
// completion.
//
// This method is guaranteed to be idempotent.
SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error)
}
BigQueryStorageServer is the server API for BigQueryStorage service.
BigQueryStorage_ReadRowsClient
type BigQueryStorage_ReadRowsClient interface {
Recv() (*ReadRowsResponse, error)
grpc.ClientStream
}
BigQueryStorage_ReadRowsServer
type BigQueryStorage_ReadRowsServer interface {
Send(*ReadRowsResponse) error
grpc.ServerStream
}
CreateReadSessionRequest
type CreateReadSessionRequest struct {
TableReference *TableReference `protobuf:"bytes,1,opt,name=table_reference,json=tableReference,proto3" json:"table_reference,omitempty"`
Parent string `protobuf:"bytes,6,opt,name=parent,proto3" json:"parent,omitempty"`
TableModifiers *TableModifiers `protobuf:"bytes,2,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"`
RequestedStreams int32 `protobuf:"varint,3,opt,name=requested_streams,json=requestedStreams,proto3" json:"requested_streams,omitempty"`
ReadOptions *TableReadOptions `protobuf:"bytes,4,opt,name=read_options,json=readOptions,proto3" json:"read_options,omitempty"`
Format DataFormat `protobuf:"varint,5,opt,name=format,proto3,enum=google.cloud.bigquery.storage.v1beta1.DataFormat" json:"format,omitempty"`
ShardingStrategy ShardingStrategy "" /* 170 byte string literal not displayed */
}
Creates a new read session, which may include additional options such as requested parallelism, projection filters and constraints.
func (*CreateReadSessionRequest) Descriptor
func (*CreateReadSessionRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateReadSessionRequest.ProtoReflect.Descriptor instead.
func (*CreateReadSessionRequest) GetFormat
func (x *CreateReadSessionRequest) GetFormat() DataFormat
func (*CreateReadSessionRequest) GetParent
func (x *CreateReadSessionRequest) GetParent() string
func (*CreateReadSessionRequest) GetReadOptions
func (x *CreateReadSessionRequest) GetReadOptions() *TableReadOptions
func (*CreateReadSessionRequest) GetRequestedStreams
func (x *CreateReadSessionRequest) GetRequestedStreams() int32
func (*CreateReadSessionRequest) GetShardingStrategy
func (x *CreateReadSessionRequest) GetShardingStrategy() ShardingStrategy
func (*CreateReadSessionRequest) GetTableModifiers
func (x *CreateReadSessionRequest) GetTableModifiers() *TableModifiers
func (*CreateReadSessionRequest) GetTableReference
func (x *CreateReadSessionRequest) GetTableReference() *TableReference
func (*CreateReadSessionRequest) ProtoMessage
func (*CreateReadSessionRequest) ProtoMessage()
func (*CreateReadSessionRequest) ProtoReflect
func (x *CreateReadSessionRequest) ProtoReflect() protoreflect.Message
func (*CreateReadSessionRequest) Reset
func (x *CreateReadSessionRequest) Reset()
func (*CreateReadSessionRequest) String
func (x *CreateReadSessionRequest) String() string
DataFormat
type DataFormat int32
Data format for input or output data.
DataFormat_DATA_FORMAT_UNSPECIFIED, DataFormat_AVRO, DataFormat_ARROW
const (
// Data format is unspecified.
DataFormat_DATA_FORMAT_UNSPECIFIED DataFormat = 0
// Avro is a standard open source row based file format.
// See https://avro.apache.org/ for more details.
DataFormat_AVRO DataFormat = 1
// Arrow is a standard open source column-based message format.
// See https://arrow.apache.org/ for more details.
DataFormat_ARROW DataFormat = 3
)
func (DataFormat) Descriptor
func (DataFormat) Descriptor() protoreflect.EnumDescriptor
func (DataFormat) Enum
func (x DataFormat) Enum() *DataFormat
func (DataFormat) EnumDescriptor
func (DataFormat) EnumDescriptor() ([]byte, []int)
Deprecated: Use DataFormat.Descriptor instead.
func (DataFormat) Number
func (x DataFormat) Number() protoreflect.EnumNumber
func (DataFormat) String
func (x DataFormat) String() string
func (DataFormat) Type
func (DataFormat) Type() protoreflect.EnumType
FinalizeStreamRequest
type FinalizeStreamRequest struct {
// Required. Stream to finalize.
Stream *Stream `protobuf:"bytes,2,opt,name=stream,proto3" json:"stream,omitempty"`
// contains filtered or unexported fields
}
Request information for invoking FinalizeStream
.
func (*FinalizeStreamRequest) Descriptor
func (*FinalizeStreamRequest) Descriptor() ([]byte, []int)
Deprecated: Use FinalizeStreamRequest.ProtoReflect.Descriptor instead.
func (*FinalizeStreamRequest) GetStream
func (x *FinalizeStreamRequest) GetStream() *Stream
func (*FinalizeStreamRequest) ProtoMessage
func (*FinalizeStreamRequest) ProtoMessage()
func (*FinalizeStreamRequest) ProtoReflect
func (x *FinalizeStreamRequest) ProtoReflect() protoreflect.Message
func (*FinalizeStreamRequest) Reset
func (x *FinalizeStreamRequest) Reset()
func (*FinalizeStreamRequest) String
func (x *FinalizeStreamRequest) String() string
Progress
type Progress struct {
// The fraction of rows assigned to the stream that have been processed by the
// server so far, not including the rows in the current response message.
//
// This value, along with `at_response_end`, can be used to interpolate the
// progress made as the rows in the message are being processed using the
// following formula: `at_response_start + (at_response_end -
// at_response_start) * rows_processed_from_response / rows_in_response`.
//
// Note that if a filter is provided, the `at_response_end` value of the
// previous response may not necessarily be equal to the `at_response_start`
// value of the current response.
AtResponseStart float32 `protobuf:"fixed32,1,opt,name=at_response_start,json=atResponseStart,proto3" json:"at_response_start,omitempty"`
// Similar to `at_response_start`, except that this value includes the rows in
// the current response.
AtResponseEnd float32 `protobuf:"fixed32,2,opt,name=at_response_end,json=atResponseEnd,proto3" json:"at_response_end,omitempty"`
// contains filtered or unexported fields
}
func (*Progress) Descriptor
Deprecated: Use Progress.ProtoReflect.Descriptor instead.
func (*Progress) GetAtResponseEnd
func (*Progress) GetAtResponseStart
func (*Progress) ProtoMessage
func (*Progress) ProtoMessage()
func (*Progress) ProtoReflect
func (x *Progress) ProtoReflect() protoreflect.Message
func (*Progress) Reset
func (x *Progress) Reset()
func (*Progress) String
ReadRowsRequest
type ReadRowsRequest struct {
// Required. Identifier of the position in the stream to start reading from.
// The offset requested must be less than the last row read from ReadRows.
// Requesting a larger offset is undefined.
ReadPosition *StreamPosition `protobuf:"bytes,1,opt,name=read_position,json=readPosition,proto3" json:"read_position,omitempty"`
// contains filtered or unexported fields
}
Requesting row data via ReadRows
must provide Stream position information.
func (*ReadRowsRequest) Descriptor
func (*ReadRowsRequest) Descriptor() ([]byte, []int)
Deprecated: Use ReadRowsRequest.ProtoReflect.Descriptor instead.
func (*ReadRowsRequest) GetReadPosition
func (x *ReadRowsRequest) GetReadPosition() *StreamPosition
func (*ReadRowsRequest) ProtoMessage
func (*ReadRowsRequest) ProtoMessage()
func (*ReadRowsRequest) ProtoReflect
func (x *ReadRowsRequest) ProtoReflect() protoreflect.Message
func (*ReadRowsRequest) Reset
func (x *ReadRowsRequest) Reset()
func (*ReadRowsRequest) String
func (x *ReadRowsRequest) String() string
ReadRowsResponse
type ReadRowsResponse struct {
// Row data is returned in format specified during session creation.
//
// Types that are assignable to Rows:
//
// *ReadRowsResponse_AvroRows
// *ReadRowsResponse_ArrowRecordBatch
Rows isReadRowsResponse_Rows `protobuf_oneof:"rows"`
// Number of serialized rows in the rows block. This value is recorded here,
// in addition to the row_count values in the output-specific messages in
// `rows`, so that code which needs to record progress through the stream can
// do so in an output format-independent way.
RowCount int64 `protobuf:"varint,6,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
// Estimated stream statistics.
Status *StreamStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
// Throttling status. If unset, the latest response still describes
// the current throttling status.
ThrottleStatus *ThrottleStatus `protobuf:"bytes,5,opt,name=throttle_status,json=throttleStatus,proto3" json:"throttle_status,omitempty"`
// The schema for the read. If read_options.selected_fields is set, the
// schema may be different from the table schema as it will only contain
// the selected fields. This schema is equivalent to the one returned by
// CreateSession. This field is only populated in the first ReadRowsResponse
// RPC.
//
// Types that are assignable to Schema:
//
// *ReadRowsResponse_AvroSchema
// *ReadRowsResponse_ArrowSchema
Schema isReadRowsResponse_Schema `protobuf_oneof:"schema"`
// contains filtered or unexported fields
}
Response from calling ReadRows
may include row data, progress and
throttling information.
func (*ReadRowsResponse) Descriptor
func (*ReadRowsResponse) Descriptor() ([]byte, []int)
Deprecated: Use ReadRowsResponse.ProtoReflect.Descriptor instead.
func (*ReadRowsResponse) GetArrowRecordBatch
func (x *ReadRowsResponse) GetArrowRecordBatch() *ArrowRecordBatch
func (*ReadRowsResponse) GetArrowSchema
func (x *ReadRowsResponse) GetArrowSchema() *ArrowSchema
func (*ReadRowsResponse) GetAvroRows
func (x *ReadRowsResponse) GetAvroRows() *AvroRows
func (*ReadRowsResponse) GetAvroSchema
func (x *ReadRowsResponse) GetAvroSchema() *AvroSchema
func (*ReadRowsResponse) GetRowCount
func (x *ReadRowsResponse) GetRowCount() int64
func (*ReadRowsResponse) GetRows
func (m *ReadRowsResponse) GetRows() isReadRowsResponse_Rows
func (*ReadRowsResponse) GetSchema
func (m *ReadRowsResponse) GetSchema() isReadRowsResponse_Schema
func (*ReadRowsResponse) GetStatus
func (x *ReadRowsResponse) GetStatus() *StreamStatus
func (*ReadRowsResponse) GetThrottleStatus
func (x *ReadRowsResponse) GetThrottleStatus() *ThrottleStatus
func (*ReadRowsResponse) ProtoMessage
func (*ReadRowsResponse) ProtoMessage()
func (*ReadRowsResponse) ProtoReflect
func (x *ReadRowsResponse) ProtoReflect() protoreflect.Message
func (*ReadRowsResponse) Reset
func (x *ReadRowsResponse) Reset()
func (*ReadRowsResponse) String
func (x *ReadRowsResponse) String() string
ReadRowsResponse_ArrowRecordBatch
type ReadRowsResponse_ArrowRecordBatch struct {
// Serialized row data in Arrow RecordBatch format.
ArrowRecordBatch *ArrowRecordBatch `protobuf:"bytes,4,opt,name=arrow_record_batch,json=arrowRecordBatch,proto3,oneof"`
}
ReadRowsResponse_ArrowSchema
type ReadRowsResponse_ArrowSchema struct {
// Output only. Arrow schema.
ArrowSchema *ArrowSchema `protobuf:"bytes,8,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"`
}
ReadRowsResponse_AvroRows
type ReadRowsResponse_AvroRows struct {
// Serialized row data in AVRO format.
AvroRows *AvroRows `protobuf:"bytes,3,opt,name=avro_rows,json=avroRows,proto3,oneof"`
}
ReadRowsResponse_AvroSchema
type ReadRowsResponse_AvroSchema struct {
// Output only. Avro schema.
AvroSchema *AvroSchema `protobuf:"bytes,7,opt,name=avro_schema,json=avroSchema,proto3,oneof"`
}
ReadSession
type ReadSession struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
Schema isReadSession_Schema `protobuf_oneof:"schema"`
Streams []*Stream `protobuf:"bytes,4,rep,name=streams,proto3" json:"streams,omitempty"`
TableReference *TableReference `protobuf:"bytes,7,opt,name=table_reference,json=tableReference,proto3" json:"table_reference,omitempty"`
TableModifiers *TableModifiers `protobuf:"bytes,8,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"`
ShardingStrategy ShardingStrategy "" /* 170 byte string literal not displayed */
}
Information returned from a CreateReadSession
request.
func (*ReadSession) Descriptor
func (*ReadSession) Descriptor() ([]byte, []int)
Deprecated: Use ReadSession.ProtoReflect.Descriptor instead.
func (*ReadSession) GetArrowSchema
func (x *ReadSession) GetArrowSchema() *ArrowSchema
func (*ReadSession) GetAvroSchema
func (x *ReadSession) GetAvroSchema() *AvroSchema
func (*ReadSession) GetExpireTime
func (x *ReadSession) GetExpireTime() *timestamppb.Timestamp
func (*ReadSession) GetName
func (x *ReadSession) GetName() string
func (*ReadSession) GetSchema
func (m *ReadSession) GetSchema() isReadSession_Schema
func (*ReadSession) GetShardingStrategy
func (x *ReadSession) GetShardingStrategy() ShardingStrategy
func (*ReadSession) GetStreams
func (x *ReadSession) GetStreams() []*Stream
func (*ReadSession) GetTableModifiers
func (x *ReadSession) GetTableModifiers() *TableModifiers
func (*ReadSession) GetTableReference
func (x *ReadSession) GetTableReference() *TableReference
func (*ReadSession) ProtoMessage
func (*ReadSession) ProtoMessage()
func (*ReadSession) ProtoReflect
func (x *ReadSession) ProtoReflect() protoreflect.Message
func (*ReadSession) Reset
func (x *ReadSession) Reset()
func (*ReadSession) String
func (x *ReadSession) String() string
ReadSession_ArrowSchema
type ReadSession_ArrowSchema struct {
// Arrow schema.
ArrowSchema *ArrowSchema `protobuf:"bytes,6,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"`
}
ReadSession_AvroSchema
type ReadSession_AvroSchema struct {
// Avro schema.
AvroSchema *AvroSchema `protobuf:"bytes,5,opt,name=avro_schema,json=avroSchema,proto3,oneof"`
}
ShardingStrategy
type ShardingStrategy int32
Strategy for distributing data among multiple streams in a read session.
ShardingStrategy_SHARDING_STRATEGY_UNSPECIFIED, ShardingStrategy_LIQUID, ShardingStrategy_BALANCED
const (
// Same as LIQUID.
ShardingStrategy_SHARDING_STRATEGY_UNSPECIFIED ShardingStrategy = 0
// Assigns data to each stream based on the client's read rate. The faster the
// client reads from a stream, the more data is assigned to the stream. In
// this strategy, it's possible to read all data from a single stream even if
// there are other streams present.
ShardingStrategy_LIQUID ShardingStrategy = 1
// Assigns data to each stream such that roughly the same number of rows can
// be read from each stream. Because the server-side unit for assigning data
// is collections of rows, the API does not guarantee that each stream will
// return the same number or rows. Additionally, the limits are enforced based
// on the number of pre-filtering rows, so some filters can lead to lopsided
// assignments.
ShardingStrategy_BALANCED ShardingStrategy = 2
)
func (ShardingStrategy) Descriptor
func (ShardingStrategy) Descriptor() protoreflect.EnumDescriptor
func (ShardingStrategy) Enum
func (x ShardingStrategy) Enum() *ShardingStrategy
func (ShardingStrategy) EnumDescriptor
func (ShardingStrategy) EnumDescriptor() ([]byte, []int)
Deprecated: Use ShardingStrategy.Descriptor instead.
func (ShardingStrategy) Number
func (x ShardingStrategy) Number() protoreflect.EnumNumber
func (ShardingStrategy) String
func (x ShardingStrategy) String() string
func (ShardingStrategy) Type
func (ShardingStrategy) Type() protoreflect.EnumType
SplitReadStreamRequest
type SplitReadStreamRequest struct {
// Required. Stream to split.
OriginalStream *Stream `protobuf:"bytes,1,opt,name=original_stream,json=originalStream,proto3" json:"original_stream,omitempty"`
// A value in the range (0.0, 1.0) that specifies the fractional point at
// which the original stream should be split. The actual split point is
// evaluated on pre-filtered rows, so if a filter is provided, then there is
// no guarantee that the division of the rows between the new child streams
// will be proportional to this fractional value. Additionally, because the
// server-side unit for assigning data is collections of rows, this fraction
// will always map to to a data storage boundary on the server side.
Fraction float32 `protobuf:"fixed32,2,opt,name=fraction,proto3" json:"fraction,omitempty"`
// contains filtered or unexported fields
}
Request information for SplitReadStream
.
func (*SplitReadStreamRequest) Descriptor
func (*SplitReadStreamRequest) Descriptor() ([]byte, []int)
Deprecated: Use SplitReadStreamRequest.ProtoReflect.Descriptor instead.
func (*SplitReadStreamRequest) GetFraction
func (x *SplitReadStreamRequest) GetFraction() float32
func (*SplitReadStreamRequest) GetOriginalStream
func (x *SplitReadStreamRequest) GetOriginalStream() *Stream
func (*SplitReadStreamRequest) ProtoMessage
func (*SplitReadStreamRequest) ProtoMessage()
func (*SplitReadStreamRequest) ProtoReflect
func (x *SplitReadStreamRequest) ProtoReflect() protoreflect.Message
func (*SplitReadStreamRequest) Reset
func (x *SplitReadStreamRequest) Reset()
func (*SplitReadStreamRequest) String
func (x *SplitReadStreamRequest) String() string
SplitReadStreamResponse
type SplitReadStreamResponse struct {
// Primary stream, which contains the beginning portion of
// |original_stream|. An empty value indicates that the original stream can no
// longer be split.
PrimaryStream *Stream `protobuf:"bytes,1,opt,name=primary_stream,json=primaryStream,proto3" json:"primary_stream,omitempty"`
// Remainder stream, which contains the tail of |original_stream|. An empty
// value indicates that the original stream can no longer be split.
RemainderStream *Stream `protobuf:"bytes,2,opt,name=remainder_stream,json=remainderStream,proto3" json:"remainder_stream,omitempty"`
// contains filtered or unexported fields
}
Response from SplitReadStream
.
func (*SplitReadStreamResponse) Descriptor
func (*SplitReadStreamResponse) Descriptor() ([]byte, []int)
Deprecated: Use SplitReadStreamResponse.ProtoReflect.Descriptor instead.
func (*SplitReadStreamResponse) GetPrimaryStream
func (x *SplitReadStreamResponse) GetPrimaryStream() *Stream
func (*SplitReadStreamResponse) GetRemainderStream
func (x *SplitReadStreamResponse) GetRemainderStream() *Stream
func (*SplitReadStreamResponse) ProtoMessage
func (*SplitReadStreamResponse) ProtoMessage()
func (*SplitReadStreamResponse) ProtoReflect
func (x *SplitReadStreamResponse) ProtoReflect() protoreflect.Message
func (*SplitReadStreamResponse) Reset
func (x *SplitReadStreamResponse) Reset()
func (*SplitReadStreamResponse) String
func (x *SplitReadStreamResponse) String() string
Stream
type Stream struct {
// Name of the stream, in the form
// `projects/{project_id}/locations/{location}/streams/{stream_id}`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
Information about a single data stream within a read session.
func (*Stream) Descriptor
Deprecated: Use Stream.ProtoReflect.Descriptor instead.
func (*Stream) GetName
func (*Stream) ProtoMessage
func (*Stream) ProtoMessage()
func (*Stream) ProtoReflect
func (x *Stream) ProtoReflect() protoreflect.Message
func (*Stream) Reset
func (x *Stream) Reset()
func (*Stream) String
StreamPosition
type StreamPosition struct {
// Identifier for a given Stream.
Stream *Stream `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"`
// Position in the stream.
Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
// contains filtered or unexported fields
}
Expresses a point within a given stream using an offset position.
func (*StreamPosition) Descriptor
func (*StreamPosition) Descriptor() ([]byte, []int)
Deprecated: Use StreamPosition.ProtoReflect.Descriptor instead.
func (*StreamPosition) GetOffset
func (x *StreamPosition) GetOffset() int64
func (*StreamPosition) GetStream
func (x *StreamPosition) GetStream() *Stream
func (*StreamPosition) ProtoMessage
func (*StreamPosition) ProtoMessage()
func (*StreamPosition) ProtoReflect
func (x *StreamPosition) ProtoReflect() protoreflect.Message
func (*StreamPosition) Reset
func (x *StreamPosition) Reset()
func (*StreamPosition) String
func (x *StreamPosition) String() string
StreamStatus
type StreamStatus struct {
// Number of estimated rows in the current stream. May change over time as
// different readers in the stream progress at rates which are relatively fast
// or slow.
EstimatedRowCount int64 `protobuf:"varint,1,opt,name=estimated_row_count,json=estimatedRowCount,proto3" json:"estimated_row_count,omitempty"`
// A value in the range [0.0, 1.0] that represents the fraction of rows
// assigned to this stream that have been processed by the server. In the
// presence of read filters, the server may process more rows than it returns,
// so this value reflects progress through the pre-filtering rows.
//
// This value is only populated for sessions created through the BALANCED
// sharding strategy.
FractionConsumed float32 `protobuf:"fixed32,2,opt,name=fraction_consumed,json=fractionConsumed,proto3" json:"fraction_consumed,omitempty"`
// Represents the progress of the current stream.
Progress *Progress `protobuf:"bytes,4,opt,name=progress,proto3" json:"progress,omitempty"`
// Whether this stream can be split. For sessions that use the LIQUID sharding
// strategy, this value is always false. For BALANCED sessions, this value is
// false when enough data have been read such that no more splits are possible
// at that point or beyond. For small tables or streams that are the result of
// a chain of splits, this value may never be true.
IsSplittable bool `protobuf:"varint,3,opt,name=is_splittable,json=isSplittable,proto3" json:"is_splittable,omitempty"`
// contains filtered or unexported fields
}
Progress information for a given Stream.
func (*StreamStatus) Descriptor
func (*StreamStatus) Descriptor() ([]byte, []int)
Deprecated: Use StreamStatus.ProtoReflect.Descriptor instead.
func (*StreamStatus) GetEstimatedRowCount
func (x *StreamStatus) GetEstimatedRowCount() int64
func (*StreamStatus) GetFractionConsumed
func (x *StreamStatus) GetFractionConsumed() float32
func (*StreamStatus) GetIsSplittable
func (x *StreamStatus) GetIsSplittable() bool
func (*StreamStatus) GetProgress
func (x *StreamStatus) GetProgress() *Progress
func (*StreamStatus) ProtoMessage
func (*StreamStatus) ProtoMessage()
func (*StreamStatus) ProtoReflect
func (x *StreamStatus) ProtoReflect() protoreflect.Message
func (*StreamStatus) Reset
func (x *StreamStatus) Reset()
func (*StreamStatus) String
func (x *StreamStatus) String() string
TableModifiers
type TableModifiers struct {
// The snapshot time of the table. If not set, interpreted as now.
SnapshotTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"`
// contains filtered or unexported fields
}
All fields in this message optional.
func (*TableModifiers) Descriptor
func (*TableModifiers) Descriptor() ([]byte, []int)
Deprecated: Use TableModifiers.ProtoReflect.Descriptor instead.
func (*TableModifiers) GetSnapshotTime
func (x *TableModifiers) GetSnapshotTime() *timestamppb.Timestamp
func (*TableModifiers) ProtoMessage
func (*TableModifiers) ProtoMessage()
func (*TableModifiers) ProtoReflect
func (x *TableModifiers) ProtoReflect() protoreflect.Message
func (*TableModifiers) Reset
func (x *TableModifiers) Reset()
func (*TableModifiers) String
func (x *TableModifiers) String() string
TableReadOptions
type TableReadOptions struct {
// Optional. The names of the fields in the table to be returned. If no
// field names are specified, then all fields in the table are returned.
//
// Nested fields -- the child elements of a STRUCT field -- can be selected
// individually using their fully-qualified names, and will be returned as
// record fields containing only the selected nested fields. If a STRUCT
// field is specified in the selected fields list, all of the child elements
// will be returned.
//
// As an example, consider a table with the following schema:
//
// {
// "name": "struct_field",
// "type": "RECORD",
// "mode": "NULLABLE",
// "fields": [
// {
// "name": "string_field1",
// "type": "STRING",
//
// . "mode": "NULLABLE"
//
// },
// {
// "name": "string_field2",
// "type": "STRING",
// "mode": "NULLABLE"
// }
// ]
// }
//
// Specifying "struct_field" in the selected fields list will result in a
// read session schema with the following logical structure:
//
// struct_field {
// string_field1
// string_field2
// }
//
// Specifying "struct_field.string_field1" in the selected fields list will
// result in a read session schema with the following logical structure:
//
// struct_field {
// string_field1
// }
//
// The order of the fields in the read session schema is derived from the
// table schema and does not correspond to the order in which the fields are
// specified in this list.
SelectedFields []string `protobuf:"bytes,1,rep,name=selected_fields,json=selectedFields,proto3" json:"selected_fields,omitempty"`
// Optional. SQL text filtering statement, similar to a WHERE clause in
// a SQL query. Aggregates are not supported.
//
// Examples: "int_field > 5"
//
// "date_field = CAST('2014-9-27' as DATE)"
// "nullable_field is not NULL"
// "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
// "numeric_field BETWEEN 1.0 AND 5.0"
//
// Restricted to a maximum length for 1 MB.
RowRestriction string `protobuf:"bytes,2,opt,name=row_restriction,json=rowRestriction,proto3" json:"row_restriction,omitempty"`
// contains filtered or unexported fields
}
Options dictating how we read a table.
func (*TableReadOptions) Descriptor
func (*TableReadOptions) Descriptor() ([]byte, []int)
Deprecated: Use TableReadOptions.ProtoReflect.Descriptor instead.
func (*TableReadOptions) GetRowRestriction
func (x *TableReadOptions) GetRowRestriction() string
func (*TableReadOptions) GetSelectedFields
func (x *TableReadOptions) GetSelectedFields() []string
func (*TableReadOptions) ProtoMessage
func (*TableReadOptions) ProtoMessage()
func (*TableReadOptions) ProtoReflect
func (x *TableReadOptions) ProtoReflect() protoreflect.Message
func (*TableReadOptions) Reset
func (x *TableReadOptions) Reset()
func (*TableReadOptions) String
func (x *TableReadOptions) String() string
TableReference
type TableReference struct {
// The assigned project ID of the project.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// The ID of the dataset in the above project.
DatasetId string `protobuf:"bytes,2,opt,name=dataset_id,json=datasetId,proto3" json:"dataset_id,omitempty"`
// The ID of the table in the above dataset.
TableId string `protobuf:"bytes,3,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
// contains filtered or unexported fields
}
Table reference that includes just the 3 strings needed to identify a table.
func (*TableReference) Descriptor
func (*TableReference) Descriptor() ([]byte, []int)
Deprecated: Use TableReference.ProtoReflect.Descriptor instead.
func (*TableReference) GetDatasetId
func (x *TableReference) GetDatasetId() string
func (*TableReference) GetProjectId
func (x *TableReference) GetProjectId() string
func (*TableReference) GetTableId
func (x *TableReference) GetTableId() string
func (*TableReference) ProtoMessage
func (*TableReference) ProtoMessage()
func (*TableReference) ProtoReflect
func (x *TableReference) ProtoReflect() protoreflect.Message
func (*TableReference) Reset
func (x *TableReference) Reset()
func (*TableReference) String
func (x *TableReference) String() string
ThrottleStatus
type ThrottleStatus struct {
// How much this connection is being throttled.
// 0 is no throttling, 100 is completely throttled.
ThrottlePercent int32 `protobuf:"varint,1,opt,name=throttle_percent,json=throttlePercent,proto3" json:"throttle_percent,omitempty"`
// contains filtered or unexported fields
}
Information on if the current connection is being throttled.
func (*ThrottleStatus) Descriptor
func (*ThrottleStatus) Descriptor() ([]byte, []int)
Deprecated: Use ThrottleStatus.ProtoReflect.Descriptor instead.
func (*ThrottleStatus) GetThrottlePercent
func (x *ThrottleStatus) GetThrottlePercent() int32
func (*ThrottleStatus) ProtoMessage
func (*ThrottleStatus) ProtoMessage()
func (*ThrottleStatus) ProtoReflect
func (x *ThrottleStatus) ProtoReflect() protoreflect.Message
func (*ThrottleStatus) Reset
func (x *ThrottleStatus) Reset()
func (*ThrottleStatus) String
func (x *ThrottleStatus) String() string
UnimplementedBigQueryStorageServer
type UnimplementedBigQueryStorageServer struct {
}
UnimplementedBigQueryStorageServer can be embedded to have forward compatible implementations.
func (*UnimplementedBigQueryStorageServer) BatchCreateReadSessionStreams
func (*UnimplementedBigQueryStorageServer) BatchCreateReadSessionStreams(context.Context, *BatchCreateReadSessionStreamsRequest) (*BatchCreateReadSessionStreamsResponse, error)
func (*UnimplementedBigQueryStorageServer) CreateReadSession
func (*UnimplementedBigQueryStorageServer) CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error)
func (*UnimplementedBigQueryStorageServer) FinalizeStream
func (*UnimplementedBigQueryStorageServer) FinalizeStream(context.Context, *FinalizeStreamRequest) (*emptypb.Empty, error)
func (*UnimplementedBigQueryStorageServer) ReadRows
func (*UnimplementedBigQueryStorageServer) ReadRows(*ReadRowsRequest, BigQueryStorage_ReadRowsServer) error
func (*UnimplementedBigQueryStorageServer) SplitReadStream
func (*UnimplementedBigQueryStorageServer) SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error)