Package cloud.google.com/go/bigquery/storage/apiv1/storagepb (v1.51.1)

Variables

ArrowSerializationOptions_CompressionCodec_name, ArrowSerializationOptions_CompressionCodec_value

var (
	ArrowSerializationOptions_CompressionCodec_name = map[int32]string{
		0: "COMPRESSION_UNSPECIFIED",
		1: "LZ4_FRAME",
		2: "ZSTD",
	}
	ArrowSerializationOptions_CompressionCodec_value = map[string]int32{
		"COMPRESSION_UNSPECIFIED": 0,
		"LZ4_FRAME":               1,
		"ZSTD":                    2,
	}
)

Enum value maps for ArrowSerializationOptions_CompressionCodec.

AppendRowsRequest_MissingValueInterpretation_name, AppendRowsRequest_MissingValueInterpretation_value

var (
	AppendRowsRequest_MissingValueInterpretation_name = map[int32]string{
		0: "MISSING_VALUE_INTERPRETATION_UNSPECIFIED",
		1: "NULL_VALUE",
		2: "DEFAULT_VALUE",
	}
	AppendRowsRequest_MissingValueInterpretation_value = map[string]int32{
		"MISSING_VALUE_INTERPRETATION_UNSPECIFIED": 0,
		"NULL_VALUE":    1,
		"DEFAULT_VALUE": 2,
	}
)

Enum value maps for AppendRowsRequest_MissingValueInterpretation.

StorageError_StorageErrorCode_name, StorageError_StorageErrorCode_value

var (
	StorageError_StorageErrorCode_name = map[int32]string{
		0: "STORAGE_ERROR_CODE_UNSPECIFIED",
		1: "TABLE_NOT_FOUND",
		2: "STREAM_ALREADY_COMMITTED",
		3: "STREAM_NOT_FOUND",
		4: "INVALID_STREAM_TYPE",
		5: "INVALID_STREAM_STATE",
		6: "STREAM_FINALIZED",
		7: "SCHEMA_MISMATCH_EXTRA_FIELDS",
		8: "OFFSET_ALREADY_EXISTS",
		9: "OFFSET_OUT_OF_RANGE",
	}
	StorageError_StorageErrorCode_value = map[string]int32{
		"STORAGE_ERROR_CODE_UNSPECIFIED": 0,
		"TABLE_NOT_FOUND":                1,
		"STREAM_ALREADY_COMMITTED":       2,
		"STREAM_NOT_FOUND":               3,
		"INVALID_STREAM_TYPE":            4,
		"INVALID_STREAM_STATE":           5,
		"STREAM_FINALIZED":               6,
		"SCHEMA_MISMATCH_EXTRA_FIELDS":   7,
		"OFFSET_ALREADY_EXISTS":          8,
		"OFFSET_OUT_OF_RANGE":            9,
	}
)

Enum value maps for StorageError_StorageErrorCode.

RowError_RowErrorCode_name, RowError_RowErrorCode_value

var (
	RowError_RowErrorCode_name = map[int32]string{
		0: "ROW_ERROR_CODE_UNSPECIFIED",
		1: "FIELDS_ERROR",
	}
	RowError_RowErrorCode_value = map[string]int32{
		"ROW_ERROR_CODE_UNSPECIFIED": 0,
		"FIELDS_ERROR":               1,
	}
)

Enum value maps for RowError_RowErrorCode.

DataFormat_name, DataFormat_value

var (
	DataFormat_name = map[int32]string{
		0: "DATA_FORMAT_UNSPECIFIED",
		1: "AVRO",
		2: "ARROW",
	}
	DataFormat_value = map[string]int32{
		"DATA_FORMAT_UNSPECIFIED": 0,
		"AVRO":                    1,
		"ARROW":                   2,
	}
)

Enum value maps for DataFormat.

WriteStreamView_name, WriteStreamView_value

var (
	WriteStreamView_name = map[int32]string{
		0: "WRITE_STREAM_VIEW_UNSPECIFIED",
		1: "BASIC",
		2: "FULL",
	}
	WriteStreamView_value = map[string]int32{
		"WRITE_STREAM_VIEW_UNSPECIFIED": 0,
		"BASIC":                         1,
		"FULL":                          2,
	}
)

Enum value maps for WriteStreamView.

WriteStream_Type_name, WriteStream_Type_value

var (
	WriteStream_Type_name = map[int32]string{
		0: "TYPE_UNSPECIFIED",
		1: "COMMITTED",
		2: "PENDING",
		3: "BUFFERED",
	}
	WriteStream_Type_value = map[string]int32{
		"TYPE_UNSPECIFIED": 0,
		"COMMITTED":        1,
		"PENDING":          2,
		"BUFFERED":         3,
	}
)

Enum value maps for WriteStream_Type.

WriteStream_WriteMode_name, WriteStream_WriteMode_value

var (
	WriteStream_WriteMode_name = map[int32]string{
		0: "WRITE_MODE_UNSPECIFIED",
		1: "INSERT",
	}
	WriteStream_WriteMode_value = map[string]int32{
		"WRITE_MODE_UNSPECIFIED": 0,
		"INSERT":                 1,
	}
)

Enum value maps for WriteStream_WriteMode.

TableFieldSchema_Type_name, TableFieldSchema_Type_value

var (
	TableFieldSchema_Type_name = map[int32]string{
		0:  "TYPE_UNSPECIFIED",
		1:  "STRING",
		2:  "INT64",
		3:  "DOUBLE",
		4:  "STRUCT",
		5:  "BYTES",
		6:  "BOOL",
		7:  "TIMESTAMP",
		8:  "DATE",
		9:  "TIME",
		10: "DATETIME",
		11: "GEOGRAPHY",
		12: "NUMERIC",
		13: "BIGNUMERIC",
		14: "INTERVAL",
		15: "JSON",
	}
	TableFieldSchema_Type_value = map[string]int32{
		"TYPE_UNSPECIFIED": 0,
		"STRING":           1,
		"INT64":            2,
		"DOUBLE":           3,
		"STRUCT":           4,
		"BYTES":            5,
		"BOOL":             6,
		"TIMESTAMP":        7,
		"DATE":             8,
		"TIME":             9,
		"DATETIME":         10,
		"GEOGRAPHY":        11,
		"NUMERIC":          12,
		"BIGNUMERIC":       13,
		"INTERVAL":         14,
		"JSON":             15,
	}
)

Enum value maps for TableFieldSchema_Type.

TableFieldSchema_Mode_name, TableFieldSchema_Mode_value

var (
	TableFieldSchema_Mode_name = map[int32]string{
		0: "MODE_UNSPECIFIED",
		1: "NULLABLE",
		2: "REQUIRED",
		3: "REPEATED",
	}
	TableFieldSchema_Mode_value = map[string]int32{
		"MODE_UNSPECIFIED": 0,
		"NULLABLE":         1,
		"REQUIRED":         2,
		"REPEATED":         3,
	}
)

Enum value maps for TableFieldSchema_Mode.

E_ColumnName

var (
	// Setting the column_name extension allows users to reference
	// bigquery column independently of the field name in the protocol buffer
	// message.
	//
	// The intended use of this annotation is to reference a destination column
	// named using characters unavailable for protobuf field names (e.g. unicode
	// characters).
	//
	// More details about BigQuery naming limitations can be found here:
	// https://cloud.google.com/bigquery/docs/schemas#column_names
	//
	// This extension is currently experimental.
	//
	// optional string column_name = 454943157;
	E_ColumnName = &file_google_cloud_bigquery_storage_v1_annotations_proto_extTypes[0]
)

Extension fields to descriptorpb.FieldOptions.

File_google_cloud_bigquery_storage_v1_annotations_proto

var File_google_cloud_bigquery_storage_v1_annotations_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_arrow_proto

var File_google_cloud_bigquery_storage_v1_arrow_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_avro_proto

var File_google_cloud_bigquery_storage_v1_avro_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_protobuf_proto

var File_google_cloud_bigquery_storage_v1_protobuf_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_storage_proto

var File_google_cloud_bigquery_storage_v1_storage_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_stream_proto

var File_google_cloud_bigquery_storage_v1_stream_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_table_proto

var File_google_cloud_bigquery_storage_v1_table_proto protoreflect.FileDescriptor

Functions

func RegisterBigQueryReadServer

func RegisterBigQueryReadServer(s *grpc.Server, srv BigQueryReadServer)

func RegisterBigQueryWriteServer

func RegisterBigQueryWriteServer(s *grpc.Server, srv BigQueryWriteServer)

AppendRowsRequest

type AppendRowsRequest struct {
	WriteStream string `protobuf:"bytes,1,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`

	Offset *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"`

	Rows isAppendRowsRequest_Rows `protobuf_oneof:"rows"`

	TraceId string `protobuf:"bytes,6,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`

	MissingValueInterpretations map[string]AppendRowsRequest_MissingValueInterpretation "" /* 316 byte string literal not displayed */

}

Request message for AppendRows.

Due to the nature of AppendRows being a bidirectional streaming RPC, certain parts of the AppendRowsRequest need only be specified for the first request sent each time the gRPC network connection is opened/reopened.

The size of a single AppendRowsRequest must be less than 10 MB in size. Requests larger than this return an error, typically INVALID_ARGUMENT.

func (*AppendRowsRequest) Descriptor

func (*AppendRowsRequest) Descriptor() ([]byte, []int)

Deprecated: Use AppendRowsRequest.ProtoReflect.Descriptor instead.

func (*AppendRowsRequest) GetMissingValueInterpretations

func (x *AppendRowsRequest) GetMissingValueInterpretations() map[string]AppendRowsRequest_MissingValueInterpretation

func (*AppendRowsRequest) GetOffset

func (x *AppendRowsRequest) GetOffset() *wrapperspb.Int64Value

func (*AppendRowsRequest) GetProtoRows

func (*AppendRowsRequest) GetRows

func (m *AppendRowsRequest) GetRows() isAppendRowsRequest_Rows

func (*AppendRowsRequest) GetTraceId

func (x *AppendRowsRequest) GetTraceId() string

func (*AppendRowsRequest) GetWriteStream

func (x *AppendRowsRequest) GetWriteStream() string

func (*AppendRowsRequest) ProtoMessage

func (*AppendRowsRequest) ProtoMessage()

func (*AppendRowsRequest) ProtoReflect

func (x *AppendRowsRequest) ProtoReflect() protoreflect.Message

func (*AppendRowsRequest) Reset

func (x *AppendRowsRequest) Reset()

func (*AppendRowsRequest) String

func (x *AppendRowsRequest) String() string

AppendRowsRequest_MissingValueInterpretation

type AppendRowsRequest_MissingValueInterpretation int32

An enum to indicate how to interpret missing values. Missing values are fields present in user schema but missing in rows. A missing value can represent a NULL or a column default value defined in BigQuery table schema.

AppendRowsRequest_MISSING_VALUE_INTERPRETATION_UNSPECIFIED, AppendRowsRequest_NULL_VALUE, AppendRowsRequest_DEFAULT_VALUE

const (
	// Invalid missing value interpretation. Requests with this value will be
	// rejected.
	AppendRowsRequest_MISSING_VALUE_INTERPRETATION_UNSPECIFIED AppendRowsRequest_MissingValueInterpretation = 0
	// Missing value is interpreted as NULL.
	AppendRowsRequest_NULL_VALUE AppendRowsRequest_MissingValueInterpretation = 1
	// Missing value is interpreted as column default value if declared in the
	// table schema, NULL otherwise.
	AppendRowsRequest_DEFAULT_VALUE AppendRowsRequest_MissingValueInterpretation = 2
)

func (AppendRowsRequest_MissingValueInterpretation) Descriptor

func (AppendRowsRequest_MissingValueInterpretation) Enum

func (AppendRowsRequest_MissingValueInterpretation) EnumDescriptor

func (AppendRowsRequest_MissingValueInterpretation) EnumDescriptor() ([]byte, []int)

Deprecated: Use AppendRowsRequest_MissingValueInterpretation.Descriptor instead.

func (AppendRowsRequest_MissingValueInterpretation) Number

func (AppendRowsRequest_MissingValueInterpretation) String

func (AppendRowsRequest_MissingValueInterpretation) Type

AppendRowsRequest_ProtoData

type AppendRowsRequest_ProtoData struct {

	// Proto schema used to serialize the data.  This value only needs to be
	// provided as part of the first request on a gRPC network connection,
	// and will be ignored for subsequent requests on the connection.
	WriterSchema *ProtoSchema `protobuf:"bytes,1,opt,name=writer_schema,json=writerSchema,proto3" json:"writer_schema,omitempty"`
	// Serialized row data in protobuf message format.
	// Currently, the backend expects the serialized rows to adhere to
	// proto2 semantics when appending rows, particularly with respect to
	// how default values are encoded.
	Rows *ProtoRows `protobuf:"bytes,2,opt,name=rows,proto3" json:"rows,omitempty"`
	// contains filtered or unexported fields
}

ProtoData contains the data rows and schema when constructing append requests.

func (*AppendRowsRequest_ProtoData) Descriptor

func (*AppendRowsRequest_ProtoData) Descriptor() ([]byte, []int)

Deprecated: Use AppendRowsRequest_ProtoData.ProtoReflect.Descriptor instead.

func (*AppendRowsRequest_ProtoData) GetRows

func (x *AppendRowsRequest_ProtoData) GetRows() *ProtoRows

func (*AppendRowsRequest_ProtoData) GetWriterSchema

func (x *AppendRowsRequest_ProtoData) GetWriterSchema() *ProtoSchema

func (*AppendRowsRequest_ProtoData) ProtoMessage

func (*AppendRowsRequest_ProtoData) ProtoMessage()

func (*AppendRowsRequest_ProtoData) ProtoReflect

func (*AppendRowsRequest_ProtoData) Reset

func (x *AppendRowsRequest_ProtoData) Reset()

func (*AppendRowsRequest_ProtoData) String

func (x *AppendRowsRequest_ProtoData) String() string

AppendRowsRequest_ProtoRows

type AppendRowsRequest_ProtoRows struct {
	// Rows in proto format.
	ProtoRows *AppendRowsRequest_ProtoData `protobuf:"bytes,4,opt,name=proto_rows,json=protoRows,proto3,oneof"`
}

AppendRowsResponse

type AppendRowsResponse struct {

	// Types that are assignable to Response:
	//	*AppendRowsResponse_AppendResult_
	//	*AppendRowsResponse_Error
	Response isAppendRowsResponse_Response `protobuf_oneof:"response"`
	// If backend detects a schema update, pass it to user so that user can
	// use it to input new type of message. It will be empty when no schema
	// updates have occurred.
	UpdatedSchema *TableSchema `protobuf:"bytes,3,opt,name=updated_schema,json=updatedSchema,proto3" json:"updated_schema,omitempty"`
	// If a request failed due to corrupted rows, no rows in the batch will be
	// appended. The API will return row level error info, so that the caller can
	// remove the bad rows and retry the request.
	RowErrors []*RowError `protobuf:"bytes,4,rep,name=row_errors,json=rowErrors,proto3" json:"row_errors,omitempty"`
	// The target of the append operation. Matches the write_stream in the
	// corresponding request.
	WriteStream string `protobuf:"bytes,5,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
	// contains filtered or unexported fields
}

Response message for AppendRows.

func (*AppendRowsResponse) Descriptor

func (*AppendRowsResponse) Descriptor() ([]byte, []int)

Deprecated: Use AppendRowsResponse.ProtoReflect.Descriptor instead.

func (*AppendRowsResponse) GetAppendResult

func (*AppendRowsResponse) GetError

func (x *AppendRowsResponse) GetError() *status.Status

func (*AppendRowsResponse) GetResponse

func (m *AppendRowsResponse) GetResponse() isAppendRowsResponse_Response

func (*AppendRowsResponse) GetRowErrors

func (x *AppendRowsResponse) GetRowErrors() []*RowError

func (*AppendRowsResponse) GetUpdatedSchema

func (x *AppendRowsResponse) GetUpdatedSchema() *TableSchema

func (*AppendRowsResponse) GetWriteStream

func (x *AppendRowsResponse) GetWriteStream() string

func (*AppendRowsResponse) ProtoMessage

func (*AppendRowsResponse) ProtoMessage()

func (*AppendRowsResponse) ProtoReflect

func (x *AppendRowsResponse) ProtoReflect() protoreflect.Message

func (*AppendRowsResponse) Reset

func (x *AppendRowsResponse) Reset()

func (*AppendRowsResponse) String

func (x *AppendRowsResponse) String() string

AppendRowsResponse_AppendResult

type AppendRowsResponse_AppendResult struct {

	// The row offset at which the last append occurred. The offset will not be
	// set if appending using default streams.
	Offset *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=offset,proto3" json:"offset,omitempty"`
	// contains filtered or unexported fields
}

AppendResult is returned for successful append requests.

func (*AppendRowsResponse_AppendResult) Descriptor

func (*AppendRowsResponse_AppendResult) Descriptor() ([]byte, []int)

Deprecated: Use AppendRowsResponse_AppendResult.ProtoReflect.Descriptor instead.

func (*AppendRowsResponse_AppendResult) GetOffset

func (*AppendRowsResponse_AppendResult) ProtoMessage

func (*AppendRowsResponse_AppendResult) ProtoMessage()

func (*AppendRowsResponse_AppendResult) ProtoReflect

func (*AppendRowsResponse_AppendResult) Reset

func (*AppendRowsResponse_AppendResult) String

AppendRowsResponse_AppendResult_

type AppendRowsResponse_AppendResult_ struct {
	// Result if the append is successful.
	AppendResult *AppendRowsResponse_AppendResult `protobuf:"bytes,1,opt,name=append_result,json=appendResult,proto3,oneof"`
}

AppendRowsResponse_Error

type AppendRowsResponse_Error struct {
	// Error returned when problems were encountered.  If present,
	// it indicates rows were not accepted into the system.
	// Users can retry or continue with other append requests within the
	// same connection.
	//
	// Additional information about error signalling:
	//
	// ALREADY_EXISTS: Happens when an append specified an offset, and the
	// backend already has received data at this offset.  Typically encountered
	// in retry scenarios, and can be ignored.
	//
	// OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
	// the current end of the stream.
	//
	// INVALID_ARGUMENT: Indicates a malformed request or data.
	//
	// ABORTED: Request processing is aborted because of prior failures.  The
	// request can be retried if previous failure is addressed.
	//
	// INTERNAL: Indicates server side error(s) that can be retried.
	Error *status.Status `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
}

ArrowRecordBatch

type ArrowRecordBatch struct {

	// IPC-serialized Arrow RecordBatch.
	SerializedRecordBatch []byte `protobuf:"bytes,1,opt,name=serialized_record_batch,json=serializedRecordBatch,proto3" json:"serialized_record_batch,omitempty"`
	// [Deprecated] The count of rows in `serialized_record_batch`.
	// Please use the format-independent ReadRowsResponse.row_count instead.
	//
	// Deprecated: Marked as deprecated in google/cloud/bigquery/storage/v1/arrow.proto.
	RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
	// contains filtered or unexported fields
}

Arrow RecordBatch.

func (*ArrowRecordBatch) Descriptor

func (*ArrowRecordBatch) Descriptor() ([]byte, []int)

Deprecated: Use ArrowRecordBatch.ProtoReflect.Descriptor instead.

func (*ArrowRecordBatch) GetRowCount

func (x *ArrowRecordBatch) GetRowCount() int64

Deprecated: Marked as deprecated in google/cloud/bigquery/storage/v1/arrow.proto.

func (*ArrowRecordBatch) GetSerializedRecordBatch

func (x *ArrowRecordBatch) GetSerializedRecordBatch() []byte

func (*ArrowRecordBatch) ProtoMessage

func (*ArrowRecordBatch) ProtoMessage()

func (*ArrowRecordBatch) ProtoReflect

func (x *ArrowRecordBatch) ProtoReflect() protoreflect.Message

func (*ArrowRecordBatch) Reset

func (x *ArrowRecordBatch) Reset()

func (*ArrowRecordBatch) String

func (x *ArrowRecordBatch) String() string

ArrowSchema

type ArrowSchema struct {

	// IPC serialized Arrow schema.
	SerializedSchema []byte `protobuf:"bytes,1,opt,name=serialized_schema,json=serializedSchema,proto3" json:"serialized_schema,omitempty"`
	// contains filtered or unexported fields
}

Arrow schema as specified in https://arrow.apache.org/docs/python/api/datatypes.html and serialized to bytes using IPC: https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc

See code samples on how this message can be deserialized.

func (*ArrowSchema) Descriptor

func (*ArrowSchema) Descriptor() ([]byte, []int)

Deprecated: Use ArrowSchema.ProtoReflect.Descriptor instead.

func (*ArrowSchema) GetSerializedSchema

func (x *ArrowSchema) GetSerializedSchema() []byte

func (*ArrowSchema) ProtoMessage

func (*ArrowSchema) ProtoMessage()

func (*ArrowSchema) ProtoReflect

func (x *ArrowSchema) ProtoReflect() protoreflect.Message

func (*ArrowSchema) Reset

func (x *ArrowSchema) Reset()

func (*ArrowSchema) String

func (x *ArrowSchema) String() string

ArrowSerializationOptions

type ArrowSerializationOptions struct {
	BufferCompression ArrowSerializationOptions_CompressionCodec "" /* 194 byte string literal not displayed */

}

Contains options specific to Arrow Serialization.

func (*ArrowSerializationOptions) Descriptor

func (*ArrowSerializationOptions) Descriptor() ([]byte, []int)

Deprecated: Use ArrowSerializationOptions.ProtoReflect.Descriptor instead.

func (*ArrowSerializationOptions) GetBufferCompression

func (*ArrowSerializationOptions) ProtoMessage

func (*ArrowSerializationOptions) ProtoMessage()

func (*ArrowSerializationOptions) ProtoReflect

func (*ArrowSerializationOptions) Reset

func (x *ArrowSerializationOptions) Reset()

func (*ArrowSerializationOptions) String

func (x *ArrowSerializationOptions) String() string

ArrowSerializationOptions_CompressionCodec

type ArrowSerializationOptions_CompressionCodec int32

Compression codec's supported by Arrow.

ArrowSerializationOptions_COMPRESSION_UNSPECIFIED, ArrowSerializationOptions_LZ4_FRAME, ArrowSerializationOptions_ZSTD

const (
	// If unspecified no compression will be used.
	ArrowSerializationOptions_COMPRESSION_UNSPECIFIED ArrowSerializationOptions_CompressionCodec = 0
	// LZ4 Frame (https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md)
	ArrowSerializationOptions_LZ4_FRAME ArrowSerializationOptions_CompressionCodec = 1
	// Zstandard compression.
	ArrowSerializationOptions_ZSTD ArrowSerializationOptions_CompressionCodec = 2
)

func (ArrowSerializationOptions_CompressionCodec) Descriptor

func (ArrowSerializationOptions_CompressionCodec) Enum

func (ArrowSerializationOptions_CompressionCodec) EnumDescriptor

func (ArrowSerializationOptions_CompressionCodec) EnumDescriptor() ([]byte, []int)

Deprecated: Use ArrowSerializationOptions_CompressionCodec.Descriptor instead.

func (ArrowSerializationOptions_CompressionCodec) Number

func (ArrowSerializationOptions_CompressionCodec) String

func (ArrowSerializationOptions_CompressionCodec) Type

AvroRows

type AvroRows struct {

	// Binary serialized rows in a block.
	SerializedBinaryRows []byte `protobuf:"bytes,1,opt,name=serialized_binary_rows,json=serializedBinaryRows,proto3" json:"serialized_binary_rows,omitempty"`
	// [Deprecated] The count of rows in the returning block.
	// Please use the format-independent ReadRowsResponse.row_count instead.
	//
	// Deprecated: Marked as deprecated in google/cloud/bigquery/storage/v1/avro.proto.
	RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
	// contains filtered or unexported fields
}

Avro rows.

func (*AvroRows) Descriptor

func (*AvroRows) Descriptor() ([]byte, []int)

Deprecated: Use AvroRows.ProtoReflect.Descriptor instead.

func (*AvroRows) GetRowCount

func (x *AvroRows) GetRowCount() int64

Deprecated: Marked as deprecated in google/cloud/bigquery/storage/v1/avro.proto.

func (*AvroRows) GetSerializedBinaryRows

func (x *AvroRows) GetSerializedBinaryRows() []byte

func (*AvroRows) ProtoMessage

func (*AvroRows) ProtoMessage()

func (*AvroRows) ProtoReflect

func (x *AvroRows) ProtoReflect() protoreflect.Message

func (*AvroRows) Reset

func (x *AvroRows) Reset()

func (*AvroRows) String

func (x *AvroRows) String() string

AvroSchema

type AvroSchema struct {

	// Json serialized schema, as described at
	// https://avro.apache.org/docs/1.8.1/spec.html.
	Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"`
	// contains filtered or unexported fields
}

Avro schema.

func (*AvroSchema) Descriptor

func (*AvroSchema) Descriptor() ([]byte, []int)

Deprecated: Use AvroSchema.ProtoReflect.Descriptor instead.

func (*AvroSchema) GetSchema

func (x *AvroSchema) GetSchema() string

func (*AvroSchema) ProtoMessage

func (*AvroSchema) ProtoMessage()

func (*AvroSchema) ProtoReflect

func (x *AvroSchema) ProtoReflect() protoreflect.Message

func (*AvroSchema) Reset

func (x *AvroSchema) Reset()

func (*AvroSchema) String

func (x *AvroSchema) String() string

AvroSerializationOptions

type AvroSerializationOptions struct {
	EnableDisplayNameAttribute bool "" /* 144 byte string literal not displayed */

}

Contains options specific to Avro Serialization.

func (*AvroSerializationOptions) Descriptor

func (*AvroSerializationOptions) Descriptor() ([]byte, []int)

Deprecated: Use AvroSerializationOptions.ProtoReflect.Descriptor instead.

func (*AvroSerializationOptions) GetEnableDisplayNameAttribute

func (x *AvroSerializationOptions) GetEnableDisplayNameAttribute() bool

func (*AvroSerializationOptions) ProtoMessage

func (*AvroSerializationOptions) ProtoMessage()

func (*AvroSerializationOptions) ProtoReflect

func (x *AvroSerializationOptions) ProtoReflect() protoreflect.Message

func (*AvroSerializationOptions) Reset

func (x *AvroSerializationOptions) Reset()

func (*AvroSerializationOptions) String

func (x *AvroSerializationOptions) String() string

BatchCommitWriteStreamsRequest

type BatchCommitWriteStreamsRequest struct {

	// Required. Parent table that all the streams should belong to, in the form
	// of `projects/{project}/datasets/{dataset}/tables/{table}`.
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The group of streams that will be committed atomically.
	WriteStreams []string `protobuf:"bytes,2,rep,name=write_streams,json=writeStreams,proto3" json:"write_streams,omitempty"`
	// contains filtered or unexported fields
}

Request message for BatchCommitWriteStreams.

func (*BatchCommitWriteStreamsRequest) Descriptor

func (*BatchCommitWriteStreamsRequest) Descriptor() ([]byte, []int)

Deprecated: Use BatchCommitWriteStreamsRequest.ProtoReflect.Descriptor instead.

func (*BatchCommitWriteStreamsRequest) GetParent

func (x *BatchCommitWriteStreamsRequest) GetParent() string

func (*BatchCommitWriteStreamsRequest) GetWriteStreams

func (x *BatchCommitWriteStreamsRequest) GetWriteStreams() []string

func (*BatchCommitWriteStreamsRequest) ProtoMessage

func (*BatchCommitWriteStreamsRequest) ProtoMessage()

func (*BatchCommitWriteStreamsRequest) ProtoReflect

func (*BatchCommitWriteStreamsRequest) Reset

func (x *BatchCommitWriteStreamsRequest) Reset()

func (*BatchCommitWriteStreamsRequest) String

BatchCommitWriteStreamsResponse

type BatchCommitWriteStreamsResponse struct {

	// The time at which streams were committed in microseconds granularity.
	// This field will only exist when there are no stream errors.
	// **Note** if this field is not set, it means the commit was not successful.
	CommitTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=commit_time,json=commitTime,proto3" json:"commit_time,omitempty"`
	// Stream level error if commit failed. Only streams with error will be in
	// the list.
	// If empty, there is no error and all streams are committed successfully.
	// If non empty, certain streams have errors and ZERO stream is committed due
	// to atomicity guarantee.
	StreamErrors []*StorageError `protobuf:"bytes,2,rep,name=stream_errors,json=streamErrors,proto3" json:"stream_errors,omitempty"`
	// contains filtered or unexported fields
}

Response message for BatchCommitWriteStreams.

func (*BatchCommitWriteStreamsResponse) Descriptor

func (*BatchCommitWriteStreamsResponse) Descriptor() ([]byte, []int)

Deprecated: Use BatchCommitWriteStreamsResponse.ProtoReflect.Descriptor instead.

func (*BatchCommitWriteStreamsResponse) GetCommitTime

func (*BatchCommitWriteStreamsResponse) GetStreamErrors

func (x *BatchCommitWriteStreamsResponse) GetStreamErrors() []*StorageError

func (*BatchCommitWriteStreamsResponse) ProtoMessage

func (*BatchCommitWriteStreamsResponse) ProtoMessage()

func (*BatchCommitWriteStreamsResponse) ProtoReflect

func (*BatchCommitWriteStreamsResponse) Reset

func (*BatchCommitWriteStreamsResponse) String

BigQueryReadClient

type BigQueryReadClient interface {
	// Creates a new read session. A read session divides the contents of a
	// BigQuery table into one or more streams, which can then be used to read
	// data from the table. The read session also specifies properties of the
	// data to be read, such as a list of columns or a push-down filter describing
	// the rows to be returned.
	//
	// A particular row can be read by at most one stream. When the caller has
	// reached the end of each stream in the session, then all the data in the
	// table has been read.
	//
	// Data is assigned to each stream such that roughly the same number of
	// rows can be read from each stream. Because the server-side unit for
	// assigning data is collections of rows, the API does not guarantee that
	// each stream will return the same number or rows. Additionally, the
	// limits are enforced based on the number of pre-filtered rows, so some
	// filters can lead to lopsided assignments.
	//
	// Read sessions automatically expire 6 hours after they are created and do
	// not require manual clean-up by the caller.
	CreateReadSession(ctx context.Context, in *CreateReadSessionRequest, opts ...grpc.CallOption) (*ReadSession, error)
	// Reads rows from the stream in the format prescribed by the ReadSession.
	// Each response contains one or more table rows, up to a maximum of 100 MiB
	// per response; read requests which attempt to read individual rows larger
	// than 100 MiB will fail.
	//
	// Each request also returns a set of stream statistics reflecting the current
	// state of the stream.
	ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigQueryRead_ReadRowsClient, error)
	// Splits a given `ReadStream` into two `ReadStream` objects. These
	// `ReadStream` objects are referred to as the primary and the residual
	// streams of the split. The original `ReadStream` can still be read from in
	// the same manner as before. Both of the returned `ReadStream` objects can
	// also be read from, and the rows returned by both child streams will be
	// the same as the rows read from the original stream.
	//
	// Moreover, the two child streams will be allocated back-to-back in the
	// original `ReadStream`. Concretely, it is guaranteed that for streams
	// original, primary, and residual, that original[0-j] = primary[0-j] and
	// original[j-n] = residual[0-m] once the streams have been read to
	// completion.
	SplitReadStream(ctx context.Context, in *SplitReadStreamRequest, opts ...grpc.CallOption) (*SplitReadStreamResponse, error)
}

BigQueryReadClient is the client API for BigQueryRead service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewBigQueryReadClient

func NewBigQueryReadClient(cc grpc.ClientConnInterface) BigQueryReadClient

BigQueryReadServer

type BigQueryReadServer interface {
	// Creates a new read session. A read session divides the contents of a
	// BigQuery table into one or more streams, which can then be used to read
	// data from the table. The read session also specifies properties of the
	// data to be read, such as a list of columns or a push-down filter describing
	// the rows to be returned.
	//
	// A particular row can be read by at most one stream. When the caller has
	// reached the end of each stream in the session, then all the data in the
	// table has been read.
	//
	// Data is assigned to each stream such that roughly the same number of
	// rows can be read from each stream. Because the server-side unit for
	// assigning data is collections of rows, the API does not guarantee that
	// each stream will return the same number or rows. Additionally, the
	// limits are enforced based on the number of pre-filtered rows, so some
	// filters can lead to lopsided assignments.
	//
	// Read sessions automatically expire 6 hours after they are created and do
	// not require manual clean-up by the caller.
	CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error)
	// Reads rows from the stream in the format prescribed by the ReadSession.
	// Each response contains one or more table rows, up to a maximum of 100 MiB
	// per response; read requests which attempt to read individual rows larger
	// than 100 MiB will fail.
	//
	// Each request also returns a set of stream statistics reflecting the current
	// state of the stream.
	ReadRows(*ReadRowsRequest, BigQueryRead_ReadRowsServer) error
	// Splits a given `ReadStream` into two `ReadStream` objects. These
	// `ReadStream` objects are referred to as the primary and the residual
	// streams of the split. The original `ReadStream` can still be read from in
	// the same manner as before. Both of the returned `ReadStream` objects can
	// also be read from, and the rows returned by both child streams will be
	// the same as the rows read from the original stream.
	//
	// Moreover, the two child streams will be allocated back-to-back in the
	// original `ReadStream`. Concretely, it is guaranteed that for streams
	// original, primary, and residual, that original[0-j] = primary[0-j] and
	// original[j-n] = residual[0-m] once the streams have been read to
	// completion.
	SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error)
}

BigQueryReadServer is the server API for BigQueryRead service.

BigQueryRead_ReadRowsClient

type BigQueryRead_ReadRowsClient interface {
	Recv() (*ReadRowsResponse, error)
	grpc.ClientStream
}

BigQueryRead_ReadRowsServer

type BigQueryRead_ReadRowsServer interface {
	Send(*ReadRowsResponse) error
	grpc.ServerStream
}

BigQueryWriteClient

type BigQueryWriteClient interface {
	// Creates a write stream to the given table.
	// Additionally, every table has a special stream named '_default'
	// to which data can be written. This stream doesn't need to be created using
	// CreateWriteStream. It is a stream that can be used simultaneously by any
	// number of clients. Data written to this stream is considered committed as
	// soon as an acknowledgement is received.
	CreateWriteStream(ctx context.Context, in *CreateWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error)
	// Appends data to the given stream.
	//
	// If `offset` is specified, the `offset` is checked against the end of
	// stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
	// attempt is made to append to an offset beyond the current end of the stream
	// or `ALREADY_EXISTS` if user provides an `offset` that has already been
	// written to. User can retry with adjusted offset within the same RPC
	// connection. If `offset` is not specified, append happens at the end of the
	// stream.
	//
	// The response contains an optional offset at which the append
	// happened.  No offset information will be returned for appends to a
	// default stream.
	//
	// Responses are received in the same order in which requests are sent.
	// There will be one response for each successful inserted request.  Responses
	// may optionally embed error information if the originating AppendRequest was
	// not successfully processed.
	//
	// The specifics of when successfully appended data is made visible to the
	// table are governed by the type of stream:
	//
	// * For COMMITTED streams (which includes the default stream), data is
	// visible immediately upon successful append.
	//
	// * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
	// rpc which advances a cursor to a newer offset in the stream.
	//
	// * For PENDING streams, data is not made visible until the stream itself is
	// finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
	// committed via the `BatchCommitWriteStreams` rpc.
	AppendRows(ctx context.Context, opts ...grpc.CallOption) (BigQueryWrite_AppendRowsClient, error)
	// Gets information about a write stream.
	GetWriteStream(ctx context.Context, in *GetWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error)
	// Finalize a write stream so that no new data can be appended to the
	// stream. Finalize is not supported on the '_default' stream.
	FinalizeWriteStream(ctx context.Context, in *FinalizeWriteStreamRequest, opts ...grpc.CallOption) (*FinalizeWriteStreamResponse, error)
	// Atomically commits a group of `PENDING` streams that belong to the same
	// `parent` table.
	//
	// Streams must be finalized before commit and cannot be committed multiple
	// times. Once a stream is committed, data in the stream becomes available
	// for read operations.
	BatchCommitWriteStreams(ctx context.Context, in *BatchCommitWriteStreamsRequest, opts ...grpc.CallOption) (*BatchCommitWriteStreamsResponse, error)
	// Flushes rows to a BUFFERED stream.
	//
	// If users are appending rows to BUFFERED stream, flush operation is
	// required in order for the rows to become available for reading. A
	// Flush operation flushes up to any previously flushed offset in a BUFFERED
	// stream, to the offset specified in the request.
	//
	// Flush is not supported on the _default stream, since it is not BUFFERED.
	FlushRows(ctx context.Context, in *FlushRowsRequest, opts ...grpc.CallOption) (*FlushRowsResponse, error)
}

BigQueryWriteClient is the client API for BigQueryWrite service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewBigQueryWriteClient

func NewBigQueryWriteClient(cc grpc.ClientConnInterface) BigQueryWriteClient

BigQueryWriteServer

type BigQueryWriteServer interface {
	// Creates a write stream to the given table.
	// Additionally, every table has a special stream named '_default'
	// to which data can be written. This stream doesn't need to be created using
	// CreateWriteStream. It is a stream that can be used simultaneously by any
	// number of clients. Data written to this stream is considered committed as
	// soon as an acknowledgement is received.
	CreateWriteStream(context.Context, *CreateWriteStreamRequest) (*WriteStream, error)
	// Appends data to the given stream.
	//
	// If `offset` is specified, the `offset` is checked against the end of
	// stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
	// attempt is made to append to an offset beyond the current end of the stream
	// or `ALREADY_EXISTS` if user provides an `offset` that has already been
	// written to. User can retry with adjusted offset within the same RPC
	// connection. If `offset` is not specified, append happens at the end of the
	// stream.
	//
	// The response contains an optional offset at which the append
	// happened.  No offset information will be returned for appends to a
	// default stream.
	//
	// Responses are received in the same order in which requests are sent.
	// There will be one response for each successful inserted request.  Responses
	// may optionally embed error information if the originating AppendRequest was
	// not successfully processed.
	//
	// The specifics of when successfully appended data is made visible to the
	// table are governed by the type of stream:
	//
	// * For COMMITTED streams (which includes the default stream), data is
	// visible immediately upon successful append.
	//
	// * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
	// rpc which advances a cursor to a newer offset in the stream.
	//
	// * For PENDING streams, data is not made visible until the stream itself is
	// finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
	// committed via the `BatchCommitWriteStreams` rpc.
	AppendRows(BigQueryWrite_AppendRowsServer) error
	// Gets information about a write stream.
	GetWriteStream(context.Context, *GetWriteStreamRequest) (*WriteStream, error)
	// Finalize a write stream so that no new data can be appended to the
	// stream. Finalize is not supported on the '_default' stream.
	FinalizeWriteStream(context.Context, *FinalizeWriteStreamRequest) (*FinalizeWriteStreamResponse, error)
	// Atomically commits a group of `PENDING` streams that belong to the same
	// `parent` table.
	//
	// Streams must be finalized before commit and cannot be committed multiple
	// times. Once a stream is committed, data in the stream becomes available
	// for read operations.
	BatchCommitWriteStreams(context.Context, *BatchCommitWriteStreamsRequest) (*BatchCommitWriteStreamsResponse, error)
	// Flushes rows to a BUFFERED stream.
	//
	// If users are appending rows to BUFFERED stream, flush operation is
	// required in order for the rows to become available for reading. A
	// Flush operation flushes up to any previously flushed offset in a BUFFERED
	// stream, to the offset specified in the request.
	//
	// Flush is not supported on the _default stream, since it is not BUFFERED.
	FlushRows(context.Context, *FlushRowsRequest) (*FlushRowsResponse, error)
}

BigQueryWriteServer is the server API for BigQueryWrite service.

BigQueryWrite_AppendRowsClient

type BigQueryWrite_AppendRowsClient interface {
	Send(*AppendRowsRequest) error
	Recv() (*AppendRowsResponse, error)
	grpc.ClientStream
}

BigQueryWrite_AppendRowsServer

type BigQueryWrite_AppendRowsServer interface {
	Send(*AppendRowsResponse) error
	Recv() (*AppendRowsRequest, error)
	grpc.ServerStream
}

CreateReadSessionRequest

type CreateReadSessionRequest struct {
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`

	ReadSession *ReadSession `protobuf:"bytes,2,opt,name=read_session,json=readSession,proto3" json:"read_session,omitempty"`

	MaxStreamCount int32 `protobuf:"varint,3,opt,name=max_stream_count,json=maxStreamCount,proto3" json:"max_stream_count,omitempty"`

	PreferredMinStreamCount int32 "" /* 135 byte string literal not displayed */

}

Request message for CreateReadSession.

func (*CreateReadSessionRequest) Descriptor

func (*CreateReadSessionRequest) Descriptor() ([]byte, []int)

Deprecated: Use CreateReadSessionRequest.ProtoReflect.Descriptor instead.

func (*CreateReadSessionRequest) GetMaxStreamCount

func (x *CreateReadSessionRequest) GetMaxStreamCount() int32

func (*CreateReadSessionRequest) GetParent

func (x *CreateReadSessionRequest) GetParent() string

func (*CreateReadSessionRequest) GetPreferredMinStreamCount

func (x *CreateReadSessionRequest) GetPreferredMinStreamCount() int32

func (*CreateReadSessionRequest) GetReadSession

func (x *CreateReadSessionRequest) GetReadSession() *ReadSession

func (*CreateReadSessionRequest) ProtoMessage

func (*CreateReadSessionRequest) ProtoMessage()

func (*CreateReadSessionRequest) ProtoReflect

func (x *CreateReadSessionRequest) ProtoReflect() protoreflect.Message

func (*CreateReadSessionRequest) Reset

func (x *CreateReadSessionRequest) Reset()

func (*CreateReadSessionRequest) String

func (x *CreateReadSessionRequest) String() string

CreateWriteStreamRequest

type CreateWriteStreamRequest struct {

	// Required. Reference to the table to which the stream belongs, in the format
	// of `projects/{project}/datasets/{dataset}/tables/{table}`.
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. Stream to be created.
	WriteStream *WriteStream `protobuf:"bytes,2,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
	// contains filtered or unexported fields
}

Request message for CreateWriteStream.

func (*CreateWriteStreamRequest) Descriptor

func (*CreateWriteStreamRequest) Descriptor() ([]byte, []int)

Deprecated: Use CreateWriteStreamRequest.ProtoReflect.Descriptor instead.

func (*CreateWriteStreamRequest) GetParent

func (x *CreateWriteStreamRequest) GetParent() string

func (*CreateWriteStreamRequest) GetWriteStream

func (x *CreateWriteStreamRequest) GetWriteStream() *WriteStream

func (*CreateWriteStreamRequest) ProtoMessage

func (*CreateWriteStreamRequest) ProtoMessage()

func (*CreateWriteStreamRequest) ProtoReflect

func (x *CreateWriteStreamRequest) ProtoReflect() protoreflect.Message

func (*CreateWriteStreamRequest) Reset

func (x *CreateWriteStreamRequest) Reset()

func (*CreateWriteStreamRequest) String

func (x *CreateWriteStreamRequest) String() string

DataFormat

type DataFormat int32

Data format for input or output data.

DataFormat_DATA_FORMAT_UNSPECIFIED, DataFormat_AVRO, DataFormat_ARROW

const (
	// Data format is unspecified.
	DataFormat_DATA_FORMAT_UNSPECIFIED DataFormat = 0
	// Avro is a standard open source row based file format.
	// See https://avro.apache.org/ for more details.
	DataFormat_AVRO DataFormat = 1
	// Arrow is a standard open source column-based message format.
	// See https://arrow.apache.org/ for more details.
	DataFormat_ARROW DataFormat = 2
)

func (DataFormat) Descriptor

func (DataFormat) Descriptor() protoreflect.EnumDescriptor

func (DataFormat) Enum

func (x DataFormat) Enum() *DataFormat

func (DataFormat) EnumDescriptor

func (DataFormat) EnumDescriptor() ([]byte, []int)

Deprecated: Use DataFormat.Descriptor instead.

func (DataFormat) Number

func (x DataFormat) Number() protoreflect.EnumNumber

func (DataFormat) String

func (x DataFormat) String() string

func (DataFormat) Type

FinalizeWriteStreamRequest

type FinalizeWriteStreamRequest struct {

	// Required. Name of the stream to finalize, in the form of
	// `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// contains filtered or unexported fields
}

Request message for invoking FinalizeWriteStream.

func (*FinalizeWriteStreamRequest) Descriptor

func (*FinalizeWriteStreamRequest) Descriptor() ([]byte, []int)

Deprecated: Use FinalizeWriteStreamRequest.ProtoReflect.Descriptor instead.

func (*FinalizeWriteStreamRequest) GetName

func (x *FinalizeWriteStreamRequest) GetName() string

func (*FinalizeWriteStreamRequest) ProtoMessage

func (*FinalizeWriteStreamRequest) ProtoMessage()

func (*FinalizeWriteStreamRequest) ProtoReflect

func (*FinalizeWriteStreamRequest) Reset

func (x *FinalizeWriteStreamRequest) Reset()

func (*FinalizeWriteStreamRequest) String

func (x *FinalizeWriteStreamRequest) String() string

FinalizeWriteStreamResponse

type FinalizeWriteStreamResponse struct {

	// Number of rows in the finalized stream.
	RowCount int64 `protobuf:"varint,1,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
	// contains filtered or unexported fields
}

Response message for FinalizeWriteStream.

func (*FinalizeWriteStreamResponse) Descriptor

func (*FinalizeWriteStreamResponse) Descriptor() ([]byte, []int)

Deprecated: Use FinalizeWriteStreamResponse.ProtoReflect.Descriptor instead.

func (*FinalizeWriteStreamResponse) GetRowCount

func (x *FinalizeWriteStreamResponse) GetRowCount() int64

func (*FinalizeWriteStreamResponse) ProtoMessage

func (*FinalizeWriteStreamResponse) ProtoMessage()

func (*FinalizeWriteStreamResponse) ProtoReflect

func (*FinalizeWriteStreamResponse) Reset

func (x *FinalizeWriteStreamResponse) Reset()

func (*FinalizeWriteStreamResponse) String

func (x *FinalizeWriteStreamResponse) String() string

FlushRowsRequest

type FlushRowsRequest struct {

	// Required. The stream that is the target of the flush operation.
	WriteStream string `protobuf:"bytes,1,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
	// Ending offset of the flush operation. Rows before this offset(including
	// this offset) will be flushed.
	Offset *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"`
	// contains filtered or unexported fields
}

Request message for FlushRows.

func (*FlushRowsRequest) Descriptor

func (*FlushRowsRequest) Descriptor() ([]byte, []int)

Deprecated: Use FlushRowsRequest.ProtoReflect.Descriptor instead.

func (*FlushRowsRequest) GetOffset

func (x *FlushRowsRequest) GetOffset() *wrapperspb.Int64Value

func (*FlushRowsRequest) GetWriteStream

func (x *FlushRowsRequest) GetWriteStream() string

func (*FlushRowsRequest) ProtoMessage

func (*FlushRowsRequest) ProtoMessage()

func (*FlushRowsRequest) ProtoReflect

func (x *FlushRowsRequest) ProtoReflect() protoreflect.Message

func (*FlushRowsRequest) Reset

func (x *FlushRowsRequest) Reset()

func (*FlushRowsRequest) String

func (x *FlushRowsRequest) String() string

FlushRowsResponse

type FlushRowsResponse struct {

	// The rows before this offset (including this offset) are flushed.
	Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
	// contains filtered or unexported fields
}

Respond message for FlushRows.

func (*FlushRowsResponse) Descriptor

func (*FlushRowsResponse) Descriptor() ([]byte, []int)

Deprecated: Use FlushRowsResponse.ProtoReflect.Descriptor instead.

func (*FlushRowsResponse) GetOffset

func (x *FlushRowsResponse) GetOffset() int64

func (*FlushRowsResponse) ProtoMessage

func (*FlushRowsResponse) ProtoMessage()

func (*FlushRowsResponse) ProtoReflect

func (x *FlushRowsResponse) ProtoReflect() protoreflect.Message

func (*FlushRowsResponse) Reset

func (x *FlushRowsResponse) Reset()

func (*FlushRowsResponse) String

func (x *FlushRowsResponse) String() string

GetWriteStreamRequest

type GetWriteStreamRequest struct {

	// Required. Name of the stream to get, in the form of
	// `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Indicates whether to get full or partial view of the WriteStream. If
	// not set, view returned will be basic.
	View WriteStreamView `protobuf:"varint,3,opt,name=view,proto3,enum=google.cloud.bigquery.storage.v1.WriteStreamView" json:"view,omitempty"`
	// contains filtered or unexported fields
}

Request message for GetWriteStreamRequest.

func (*GetWriteStreamRequest) Descriptor

func (*GetWriteStreamRequest) Descriptor() ([]byte, []int)

Deprecated: Use GetWriteStreamRequest.ProtoReflect.Descriptor instead.

func (*GetWriteStreamRequest) GetName

func (x *GetWriteStreamRequest) GetName() string

func (*GetWriteStreamRequest) GetView

func (*GetWriteStreamRequest) ProtoMessage

func (*GetWriteStreamRequest) ProtoMessage()

func (*GetWriteStreamRequest) ProtoReflect

func (x *GetWriteStreamRequest) ProtoReflect() protoreflect.Message

func (*GetWriteStreamRequest) Reset

func (x *GetWriteStreamRequest) Reset()

func (*GetWriteStreamRequest) String

func (x *GetWriteStreamRequest) String() string

ProtoRows

type ProtoRows struct {

	// A sequence of rows serialized as a Protocol Buffer.
	//
	// See https://developers.google.com/protocol-buffers/docs/overview for more
	// information on deserializing this field.
	SerializedRows [][]byte `protobuf:"bytes,1,rep,name=serialized_rows,json=serializedRows,proto3" json:"serialized_rows,omitempty"`
	// contains filtered or unexported fields
}

func (*ProtoRows) Descriptor

func (*ProtoRows) Descriptor() ([]byte, []int)

Deprecated: Use ProtoRows.ProtoReflect.Descriptor instead.

func (*ProtoRows) GetSerializedRows

func (x *ProtoRows) GetSerializedRows() [][]byte

func (*ProtoRows) ProtoMessage

func (*ProtoRows) ProtoMessage()

func (*ProtoRows) ProtoReflect

func (x *ProtoRows) ProtoReflect() protoreflect.Message

func (*ProtoRows) Reset

func (x *ProtoRows) Reset()

func (*ProtoRows) String

func (x *ProtoRows) String() string

ProtoSchema

type ProtoSchema struct {

	// Descriptor for input message.  The provided descriptor must be self
	// contained, such that data rows sent can be fully decoded using only the
	// single descriptor.  For data rows that are compositions of multiple
	// independent messages, this means the descriptor may need to be transformed
	// to only use nested types:
	// https://developers.google.com/protocol-buffers/docs/proto#nested
	//
	// For additional information for how proto types and values map onto BigQuery
	// see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
	ProtoDescriptor *descriptorpb.DescriptorProto `protobuf:"bytes,1,opt,name=proto_descriptor,json=protoDescriptor,proto3" json:"proto_descriptor,omitempty"`
	// contains filtered or unexported fields
}

ProtoSchema describes the schema of the serialized protocol buffer data rows.

func (*ProtoSchema) Descriptor

func (*ProtoSchema) Descriptor() ([]byte, []int)

Deprecated: Use ProtoSchema.ProtoReflect.Descriptor instead.

func (*ProtoSchema) GetProtoDescriptor

func (x *ProtoSchema) GetProtoDescriptor() *descriptorpb.DescriptorProto

func (*ProtoSchema) ProtoMessage

func (*ProtoSchema) ProtoMessage()

func (*ProtoSchema) ProtoReflect

func (x *ProtoSchema) ProtoReflect() protoreflect.Message

func (*ProtoSchema) Reset

func (x *ProtoSchema) Reset()

func (*ProtoSchema) String

func (x *ProtoSchema) String() string

ReadRowsRequest

type ReadRowsRequest struct {

	// Required. Stream to read rows from.
	ReadStream string `protobuf:"bytes,1,opt,name=read_stream,json=readStream,proto3" json:"read_stream,omitempty"`
	// The offset requested must be less than the last row read from Read.
	// Requesting a larger offset is undefined. If not specified, start reading
	// from offset zero.
	Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
	// contains filtered or unexported fields
}

Request message for ReadRows.

func (*ReadRowsRequest) Descriptor

func (*ReadRowsRequest) Descriptor() ([]byte, []int)

Deprecated: Use ReadRowsRequest.ProtoReflect.Descriptor instead.

func (*ReadRowsRequest) GetOffset

func (x *ReadRowsRequest) GetOffset() int64

func (*ReadRowsRequest) GetReadStream

func (x *ReadRowsRequest) GetReadStream() string

func (*ReadRowsRequest) ProtoMessage

func (*ReadRowsRequest) ProtoMessage()

func (*ReadRowsRequest) ProtoReflect

func (x *ReadRowsRequest) ProtoReflect() protoreflect.Message

func (*ReadRowsRequest) Reset

func (x *ReadRowsRequest) Reset()

func (*ReadRowsRequest) String

func (x *ReadRowsRequest) String() string

ReadRowsResponse

type ReadRowsResponse struct {

	// Row data is returned in format specified during session creation.
	//
	// Types that are assignable to Rows:
	//	*ReadRowsResponse_AvroRows
	//	*ReadRowsResponse_ArrowRecordBatch
	Rows isReadRowsResponse_Rows `protobuf_oneof:"rows"`
	// Number of serialized rows in the rows block.
	RowCount int64 `protobuf:"varint,6,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
	// Statistics for the stream.
	Stats *StreamStats `protobuf:"bytes,2,opt,name=stats,proto3" json:"stats,omitempty"`
	// Throttling state. If unset, the latest response still describes
	// the current throttling status.
	ThrottleState *ThrottleState `protobuf:"bytes,5,opt,name=throttle_state,json=throttleState,proto3" json:"throttle_state,omitempty"`
	// The schema for the read. If read_options.selected_fields is set, the
	// schema may be different from the table schema as it will only contain
	// the selected fields. This schema is equivalent to the one returned by
	// CreateSession. This field is only populated in the first ReadRowsResponse
	// RPC.
	//
	// Types that are assignable to Schema:
	//	*ReadRowsResponse_AvroSchema
	//	*ReadRowsResponse_ArrowSchema
	Schema isReadRowsResponse_Schema `protobuf_oneof:"schema"`
	// contains filtered or unexported fields
}

Response from calling ReadRows may include row data, progress and throttling information.

func (*ReadRowsResponse) Descriptor

func (*ReadRowsResponse) Descriptor() ([]byte, []int)

Deprecated: Use ReadRowsResponse.ProtoReflect.Descriptor instead.

func (*ReadRowsResponse) GetArrowRecordBatch

func (x *ReadRowsResponse) GetArrowRecordBatch() *ArrowRecordBatch

func (*ReadRowsResponse) GetArrowSchema

func (x *ReadRowsResponse) GetArrowSchema() *ArrowSchema

func (*ReadRowsResponse) GetAvroRows

func (x *ReadRowsResponse) GetAvroRows() *AvroRows

func (*ReadRowsResponse) GetAvroSchema

func (x *ReadRowsResponse) GetAvroSchema() *AvroSchema

func (*ReadRowsResponse) GetRowCount

func (x *ReadRowsResponse) GetRowCount() int64

func (*ReadRowsResponse) GetRows

func (m *ReadRowsResponse) GetRows() isReadRowsResponse_Rows

func (*ReadRowsResponse) GetSchema

func (m *ReadRowsResponse) GetSchema() isReadRowsResponse_Schema

func (*ReadRowsResponse) GetStats

func (x *ReadRowsResponse) GetStats() *StreamStats

func (*ReadRowsResponse) GetThrottleState

func (x *ReadRowsResponse) GetThrottleState() *ThrottleState

func (*ReadRowsResponse) ProtoMessage

func (*ReadRowsResponse) ProtoMessage()

func (*ReadRowsResponse) ProtoReflect

func (x *ReadRowsResponse) ProtoReflect() protoreflect.Message

func (*ReadRowsResponse) Reset

func (x *ReadRowsResponse) Reset()

func (*ReadRowsResponse) String

func (x *ReadRowsResponse) String() string

ReadRowsResponse_ArrowRecordBatch

type ReadRowsResponse_ArrowRecordBatch struct {
	// Serialized row data in Arrow RecordBatch format.
	ArrowRecordBatch *ArrowRecordBatch `protobuf:"bytes,4,opt,name=arrow_record_batch,json=arrowRecordBatch,proto3,oneof"`
}

ReadRowsResponse_ArrowSchema

type ReadRowsResponse_ArrowSchema struct {
	// Output only. Arrow schema.
	ArrowSchema *ArrowSchema `protobuf:"bytes,8,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"`
}

ReadRowsResponse_AvroRows

type ReadRowsResponse_AvroRows struct {
	// Serialized row data in AVRO format.
	AvroRows *AvroRows `protobuf:"bytes,3,opt,name=avro_rows,json=avroRows,proto3,oneof"`
}

ReadRowsResponse_AvroSchema

type ReadRowsResponse_AvroSchema struct {
	// Output only. Avro schema.
	AvroSchema *AvroSchema `protobuf:"bytes,7,opt,name=avro_schema,json=avroSchema,proto3,oneof"`
}

ReadSession

type ReadSession struct {
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`

	ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`

	DataFormat DataFormat "" /* 141 byte string literal not displayed */

	Schema isReadSession_Schema `protobuf_oneof:"schema"`

	Table string `protobuf:"bytes,6,opt,name=table,proto3" json:"table,omitempty"`

	TableModifiers *ReadSession_TableModifiers `protobuf:"bytes,7,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"`

	ReadOptions *ReadSession_TableReadOptions `protobuf:"bytes,8,opt,name=read_options,json=readOptions,proto3" json:"read_options,omitempty"`

	Streams []*ReadStream `protobuf:"bytes,10,rep,name=streams,proto3" json:"streams,omitempty"`

	EstimatedTotalBytesScanned int64 "" /* 145 byte string literal not displayed */

	EstimatedRowCount int64 `protobuf:"varint,14,opt,name=estimated_row_count,json=estimatedRowCount,proto3" json:"estimated_row_count,omitempty"`

	TraceId string `protobuf:"bytes,13,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`

}

Information about the ReadSession.

func (*ReadSession) Descriptor

func (*ReadSession) Descriptor() ([]byte, []int)

Deprecated: Use ReadSession.ProtoReflect.Descriptor instead.

func (*ReadSession) GetArrowSchema

func (x *ReadSession) GetArrowSchema() *ArrowSchema

func (*ReadSession) GetAvroSchema

func (x *ReadSession) GetAvroSchema() *AvroSchema

func (*ReadSession) GetDataFormat

func (x *ReadSession) GetDataFormat() DataFormat

func (*ReadSession) GetEstimatedRowCount

func (x *ReadSession) GetEstimatedRowCount() int64

func (*ReadSession) GetEstimatedTotalBytesScanned

func (x *ReadSession) GetEstimatedTotalBytesScanned() int64

func (*ReadSession) GetExpireTime

func (x *ReadSession) GetExpireTime() *timestamppb.Timestamp

func (*ReadSession) GetName

func (x *ReadSession) GetName() string

func (*ReadSession) GetReadOptions

func (x *ReadSession) GetReadOptions() *ReadSession_TableReadOptions

func (*ReadSession) GetSchema

func (m *ReadSession) GetSchema() isReadSession_Schema

func (*ReadSession) GetStreams

func (x *ReadSession) GetStreams() []*ReadStream

func (*ReadSession) GetTable

func (x *ReadSession) GetTable() string

func (*ReadSession) GetTableModifiers

func (x *ReadSession) GetTableModifiers() *ReadSession_TableModifiers

func (*ReadSession) GetTraceId

func (x *ReadSession) GetTraceId() string

func (*ReadSession) ProtoMessage

func (*ReadSession) ProtoMessage()

func (*ReadSession) ProtoReflect

func (x *ReadSession) ProtoReflect() protoreflect.Message

func (*ReadSession) Reset

func (x *ReadSession) Reset()

func (*ReadSession) String

func (x *ReadSession) String() string

ReadSession_ArrowSchema

type ReadSession_ArrowSchema struct {
	// Output only. Arrow schema.
	ArrowSchema *ArrowSchema `protobuf:"bytes,5,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"`
}

ReadSession_AvroSchema

type ReadSession_AvroSchema struct {
	// Output only. Avro schema.
	AvroSchema *AvroSchema `protobuf:"bytes,4,opt,name=avro_schema,json=avroSchema,proto3,oneof"`
}

ReadSession_TableModifiers

type ReadSession_TableModifiers struct {

	// The snapshot time of the table. If not set, interpreted as now.
	SnapshotTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"`
	// contains filtered or unexported fields
}

Additional attributes when reading a table.

func (*ReadSession_TableModifiers) Descriptor

func (*ReadSession_TableModifiers) Descriptor() ([]byte, []int)

Deprecated: Use ReadSession_TableModifiers.ProtoReflect.Descriptor instead.

func (*ReadSession_TableModifiers) GetSnapshotTime

func (x *ReadSession_TableModifiers) GetSnapshotTime() *timestamppb.Timestamp

func (*ReadSession_TableModifiers) ProtoMessage

func (*ReadSession_TableModifiers) ProtoMessage()

func (*ReadSession_TableModifiers) ProtoReflect

func (*ReadSession_TableModifiers) Reset

func (x *ReadSession_TableModifiers) Reset()

func (*ReadSession_TableModifiers) String

func (x *ReadSession_TableModifiers) String() string

ReadSession_TableReadOptions

type ReadSession_TableReadOptions struct {

	// Optional. The names of the fields in the table to be returned. If no
	// field names are specified, then all fields in the table are returned.
	//
	// Nested fields -- the child elements of a STRUCT field -- can be selected
	// individually using their fully-qualified names, and will be returned as
	// record fields containing only the selected nested fields. If a STRUCT
	// field is specified in the selected fields list, all of the child elements
	// will be returned.
	//
	// As an example, consider a table with the following schema:
	//
	//   {
	//       "name": "struct_field",
	//       "type": "RECORD",
	//       "mode": "NULLABLE",
	//       "fields": [
	//           {
	//               "name": "string_field1",
	//               "type": "STRING",
	// .              "mode": "NULLABLE"
	//           },
	//           {
	//               "name": "string_field2",
	//               "type": "STRING",
	//               "mode": "NULLABLE"
	//           }
	//       ]
	//   }
	//
	// Specifying "struct_field" in the selected fields list will result in a
	// read session schema with the following logical structure:
	//
	//   struct_field {
	//       string_field1
	//       string_field2
	//   }
	//
	// Specifying "struct_field.string_field1" in the selected fields list will
	// result in a read session schema with the following logical structure:
	//
	//   struct_field {
	//       string_field1
	//   }
	//
	// The order of the fields in the read session schema is derived from the
	// table schema and does not correspond to the order in which the fields are
	// specified in this list.
	SelectedFields []string `protobuf:"bytes,1,rep,name=selected_fields,json=selectedFields,proto3" json:"selected_fields,omitempty"`
	// SQL text filtering statement, similar to a WHERE clause in a query.
	// Aggregates are not supported.
	//
	// Examples: "int_field > 5"
	//           "date_field = CAST('2014-9-27' as DATE)"
	//           "nullable_field is not NULL"
	//           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
	//           "numeric_field BETWEEN 1.0 AND 5.0"
	//
	// Restricted to a maximum length for 1 MB.
	RowRestriction string `protobuf:"bytes,2,opt,name=row_restriction,json=rowRestriction,proto3" json:"row_restriction,omitempty"`
	// Types that are assignable to OutputFormatSerializationOptions:
	//	*ReadSession_TableReadOptions_ArrowSerializationOptions
	//	*ReadSession_TableReadOptions_AvroSerializationOptions
	OutputFormatSerializationOptions isReadSession_TableReadOptions_OutputFormatSerializationOptions `protobuf_oneof:"output_format_serialization_options"`
	// contains filtered or unexported fields
}

Options dictating how we read a table.

func (*ReadSession_TableReadOptions) Descriptor

func (*ReadSession_TableReadOptions) Descriptor() ([]byte, []int)

Deprecated: Use ReadSession_TableReadOptions.ProtoReflect.Descriptor instead.

func (*ReadSession_TableReadOptions) GetArrowSerializationOptions

func (x *ReadSession_TableReadOptions) GetArrowSerializationOptions() *ArrowSerializationOptions

func (*ReadSession_TableReadOptions) GetAvroSerializationOptions

func (x *ReadSession_TableReadOptions) GetAvroSerializationOptions() *AvroSerializationOptions

func (*ReadSession_TableReadOptions) GetOutputFormatSerializationOptions

func (m *ReadSession_TableReadOptions) GetOutputFormatSerializationOptions() isReadSession_TableReadOptions_OutputFormatSerializationOptions

func (*ReadSession_TableReadOptions) GetRowRestriction

func (x *ReadSession_TableReadOptions) GetRowRestriction() string

func (*ReadSession_TableReadOptions) GetSelectedFields

func (x *ReadSession_TableReadOptions) GetSelectedFields() []string

func (*ReadSession_TableReadOptions) ProtoMessage

func (*ReadSession_TableReadOptions) ProtoMessage()

func (*ReadSession_TableReadOptions) ProtoReflect

func (*ReadSession_TableReadOptions) Reset

func (x *ReadSession_TableReadOptions) Reset()

func (*ReadSession_TableReadOptions) String

ReadSession_TableReadOptions_ArrowSerializationOptions

type ReadSession_TableReadOptions_ArrowSerializationOptions struct {
	// Optional. Options specific to the Apache Arrow output format.
	ArrowSerializationOptions *ArrowSerializationOptions `protobuf:"bytes,3,opt,name=arrow_serialization_options,json=arrowSerializationOptions,proto3,oneof"`
}

ReadSession_TableReadOptions_AvroSerializationOptions

type ReadSession_TableReadOptions_AvroSerializationOptions struct {
	// Optional. Options specific to the Apache Avro output format
	AvroSerializationOptions *AvroSerializationOptions `protobuf:"bytes,4,opt,name=avro_serialization_options,json=avroSerializationOptions,proto3,oneof"`
}

ReadStream

type ReadStream struct {

	// Output only. Name of the stream, in the form
	// `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// contains filtered or unexported fields
}

Information about a single stream that gets data out of the storage system. Most of the information about ReadStream instances is aggregated, making ReadStream lightweight.

func (*ReadStream) Descriptor

func (*ReadStream) Descriptor() ([]byte, []int)

Deprecated: Use ReadStream.ProtoReflect.Descriptor instead.

func (*ReadStream) GetName

func (x *ReadStream) GetName() string

func (*ReadStream) ProtoMessage

func (*ReadStream) ProtoMessage()

func (*ReadStream) ProtoReflect

func (x *ReadStream) ProtoReflect() protoreflect.Message

func (*ReadStream) Reset

func (x *ReadStream) Reset()

func (*ReadStream) String

func (x *ReadStream) String() string

RowError

type RowError struct {

	// Index of the malformed row in the request.
	Index int64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
	// Structured error reason for a row error.
	Code RowError_RowErrorCode `protobuf:"varint,2,opt,name=code,proto3,enum=google.cloud.bigquery.storage.v1.RowError_RowErrorCode" json:"code,omitempty"`
	// Description of the issue encountered when processing the row.
	Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
	// contains filtered or unexported fields
}

The message that presents row level error info in a request.

func (*RowError) Descriptor

func (*RowError) Descriptor() ([]byte, []int)

Deprecated: Use RowError.ProtoReflect.Descriptor instead.

func (*RowError) GetCode

func (x *RowError) GetCode() RowError_RowErrorCode

func (*RowError) GetIndex

func (x *RowError) GetIndex() int64

func (*RowError) GetMessage

func (x *RowError) GetMessage() string

func (*RowError) ProtoMessage

func (*RowError) ProtoMessage()

func (*RowError) ProtoReflect

func (x *RowError) ProtoReflect() protoreflect.Message

func (*RowError) Reset

func (x *RowError) Reset()

func (*RowError) String

func (x *RowError) String() string

RowError_RowErrorCode

type RowError_RowErrorCode int32

Error code for RowError.

RowError_ROW_ERROR_CODE_UNSPECIFIED, RowError_FIELDS_ERROR

const (
	// Default error.
	RowError_ROW_ERROR_CODE_UNSPECIFIED RowError_RowErrorCode = 0
	// One or more fields in the row has errors.
	RowError_FIELDS_ERROR RowError_RowErrorCode = 1
)

func (RowError_RowErrorCode) Descriptor

func (RowError_RowErrorCode) Enum

func (RowError_RowErrorCode) EnumDescriptor

func (RowError_RowErrorCode) EnumDescriptor() ([]byte, []int)

Deprecated: Use RowError_RowErrorCode.Descriptor instead.

func (RowError_RowErrorCode) Number

func (RowError_RowErrorCode) String

func (x RowError_RowErrorCode) String() string

func (RowError_RowErrorCode) Type

SplitReadStreamRequest

type SplitReadStreamRequest struct {

	// Required. Name of the stream to split.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// A value in the range (0.0, 1.0) that specifies the fractional point at
	// which the original stream should be split. The actual split point is
	// evaluated on pre-filtered rows, so if a filter is provided, then there is
	// no guarantee that the division of the rows between the new child streams
	// will be proportional to this fractional value. Additionally, because the
	// server-side unit for assigning data is collections of rows, this fraction
	// will always map to a data storage boundary on the server side.
	Fraction float64 `protobuf:"fixed64,2,opt,name=fraction,proto3" json:"fraction,omitempty"`
	// contains filtered or unexported fields
}

Request message for SplitReadStream.

func (*SplitReadStreamRequest) Descriptor