- 1.65.0 (latest)
- 1.64.0
- 1.63.1
- 1.62.0
- 1.61.0
- 1.60.0
- 1.59.1
- 1.58.0
- 1.57.1
- 1.54.0
- 1.53.0
- 1.52.0
- 1.51.2
- 1.50.0
- 1.49.0
- 1.48.0
- 1.47.0
- 1.46.0
- 1.45.0
- 1.44.0
- 1.43.0
- 1.42.0
- 1.41.0
- 1.40.0
- 1.39.0
- 1.38.0
- 1.37.0
- 1.36.0
- 1.35.0
- 1.34.1
- 1.33.0
- 1.32.0
- 1.31.0
- 1.30.2
- 1.29.0
- 1.28.0
- 1.27.0
- 1.26.0
- 1.25.0
- 1.24.0
- 1.23.0
- 1.22.0
- 1.21.0
- 1.20.1
- 1.19.0
- 1.18.0
- 1.17.0
- 1.16.0
- 1.15.0
Variables
ArrowSerializationOptions_CompressionCodec_name, ArrowSerializationOptions_CompressionCodec_value
var (
ArrowSerializationOptions_CompressionCodec_name = map[int32]string{
0: "COMPRESSION_UNSPECIFIED",
1: "LZ4_FRAME",
2: "ZSTD",
}
ArrowSerializationOptions_CompressionCodec_value = map[string]int32{
"COMPRESSION_UNSPECIFIED": 0,
"LZ4_FRAME": 1,
"ZSTD": 2,
}
)
Enum value maps for ArrowSerializationOptions_CompressionCodec.
AppendRowsRequest_MissingValueInterpretation_name, AppendRowsRequest_MissingValueInterpretation_value
var (
AppendRowsRequest_MissingValueInterpretation_name = map[int32]string{
0: "MISSING_VALUE_INTERPRETATION_UNSPECIFIED",
1: "NULL_VALUE",
2: "DEFAULT_VALUE",
}
AppendRowsRequest_MissingValueInterpretation_value = map[string]int32{
"MISSING_VALUE_INTERPRETATION_UNSPECIFIED": 0,
"NULL_VALUE": 1,
"DEFAULT_VALUE": 2,
}
)
Enum value maps for AppendRowsRequest_MissingValueInterpretation.
StorageError_StorageErrorCode_name, StorageError_StorageErrorCode_value
var (
StorageError_StorageErrorCode_name = map[int32]string{
0: "STORAGE_ERROR_CODE_UNSPECIFIED",
1: "TABLE_NOT_FOUND",
2: "STREAM_ALREADY_COMMITTED",
3: "STREAM_NOT_FOUND",
4: "INVALID_STREAM_TYPE",
5: "INVALID_STREAM_STATE",
6: "STREAM_FINALIZED",
7: "SCHEMA_MISMATCH_EXTRA_FIELDS",
8: "OFFSET_ALREADY_EXISTS",
9: "OFFSET_OUT_OF_RANGE",
10: "CMEK_NOT_PROVIDED",
11: "INVALID_CMEK_PROVIDED",
12: "CMEK_ENCRYPTION_ERROR",
13: "KMS_SERVICE_ERROR",
14: "KMS_PERMISSION_DENIED",
}
StorageError_StorageErrorCode_value = map[string]int32{
"STORAGE_ERROR_CODE_UNSPECIFIED": 0,
"TABLE_NOT_FOUND": 1,
"STREAM_ALREADY_COMMITTED": 2,
"STREAM_NOT_FOUND": 3,
"INVALID_STREAM_TYPE": 4,
"INVALID_STREAM_STATE": 5,
"STREAM_FINALIZED": 6,
"SCHEMA_MISMATCH_EXTRA_FIELDS": 7,
"OFFSET_ALREADY_EXISTS": 8,
"OFFSET_OUT_OF_RANGE": 9,
"CMEK_NOT_PROVIDED": 10,
"INVALID_CMEK_PROVIDED": 11,
"CMEK_ENCRYPTION_ERROR": 12,
"KMS_SERVICE_ERROR": 13,
"KMS_PERMISSION_DENIED": 14,
}
)
Enum value maps for StorageError_StorageErrorCode.
RowError_RowErrorCode_name, RowError_RowErrorCode_value
var (
RowError_RowErrorCode_name = map[int32]string{
0: "ROW_ERROR_CODE_UNSPECIFIED",
1: "FIELDS_ERROR",
}
RowError_RowErrorCode_value = map[string]int32{
"ROW_ERROR_CODE_UNSPECIFIED": 0,
"FIELDS_ERROR": 1,
}
)
Enum value maps for RowError_RowErrorCode.
DataFormat_name, DataFormat_value
var (
DataFormat_name = map[int32]string{
0: "DATA_FORMAT_UNSPECIFIED",
1: "AVRO",
2: "ARROW",
}
DataFormat_value = map[string]int32{
"DATA_FORMAT_UNSPECIFIED": 0,
"AVRO": 1,
"ARROW": 2,
}
)
Enum value maps for DataFormat.
WriteStreamView_name, WriteStreamView_value
var (
WriteStreamView_name = map[int32]string{
0: "WRITE_STREAM_VIEW_UNSPECIFIED",
1: "BASIC",
2: "FULL",
}
WriteStreamView_value = map[string]int32{
"WRITE_STREAM_VIEW_UNSPECIFIED": 0,
"BASIC": 1,
"FULL": 2,
}
)
Enum value maps for WriteStreamView.
ReadSession_TableReadOptions_ResponseCompressionCodec_name, ReadSession_TableReadOptions_ResponseCompressionCodec_value
var (
ReadSession_TableReadOptions_ResponseCompressionCodec_name = map[int32]string{
0: "RESPONSE_COMPRESSION_CODEC_UNSPECIFIED",
2: "RESPONSE_COMPRESSION_CODEC_LZ4",
}
ReadSession_TableReadOptions_ResponseCompressionCodec_value = map[string]int32{
"RESPONSE_COMPRESSION_CODEC_UNSPECIFIED": 0,
"RESPONSE_COMPRESSION_CODEC_LZ4": 2,
}
)
Enum value maps for ReadSession_TableReadOptions_ResponseCompressionCodec.
WriteStream_Type_name, WriteStream_Type_value
var (
WriteStream_Type_name = map[int32]string{
0: "TYPE_UNSPECIFIED",
1: "COMMITTED",
2: "PENDING",
3: "BUFFERED",
}
WriteStream_Type_value = map[string]int32{
"TYPE_UNSPECIFIED": 0,
"COMMITTED": 1,
"PENDING": 2,
"BUFFERED": 3,
}
)
Enum value maps for WriteStream_Type.
WriteStream_WriteMode_name, WriteStream_WriteMode_value
var (
WriteStream_WriteMode_name = map[int32]string{
0: "WRITE_MODE_UNSPECIFIED",
1: "INSERT",
}
WriteStream_WriteMode_value = map[string]int32{
"WRITE_MODE_UNSPECIFIED": 0,
"INSERT": 1,
}
)
Enum value maps for WriteStream_WriteMode.
TableFieldSchema_Type_name, TableFieldSchema_Type_value
var (
TableFieldSchema_Type_name = map[int32]string{
0: "TYPE_UNSPECIFIED",
1: "STRING",
2: "INT64",
3: "DOUBLE",
4: "STRUCT",
5: "BYTES",
6: "BOOL",
7: "TIMESTAMP",
8: "DATE",
9: "TIME",
10: "DATETIME",
11: "GEOGRAPHY",
12: "NUMERIC",
13: "BIGNUMERIC",
14: "INTERVAL",
15: "JSON",
16: "RANGE",
}
TableFieldSchema_Type_value = map[string]int32{
"TYPE_UNSPECIFIED": 0,
"STRING": 1,
"INT64": 2,
"DOUBLE": 3,
"STRUCT": 4,
"BYTES": 5,
"BOOL": 6,
"TIMESTAMP": 7,
"DATE": 8,
"TIME": 9,
"DATETIME": 10,
"GEOGRAPHY": 11,
"NUMERIC": 12,
"BIGNUMERIC": 13,
"INTERVAL": 14,
"JSON": 15,
"RANGE": 16,
}
)
Enum value maps for TableFieldSchema_Type.
TableFieldSchema_Mode_name, TableFieldSchema_Mode_value
var (
TableFieldSchema_Mode_name = map[int32]string{
0: "MODE_UNSPECIFIED",
1: "NULLABLE",
2: "REQUIRED",
3: "REPEATED",
}
TableFieldSchema_Mode_value = map[string]int32{
"MODE_UNSPECIFIED": 0,
"NULLABLE": 1,
"REQUIRED": 2,
"REPEATED": 3,
}
)
Enum value maps for TableFieldSchema_Mode.
E_ColumnName
var (
// Setting the column_name extension allows users to reference
// bigquery column independently of the field name in the protocol buffer
// message.
//
// The intended use of this annotation is to reference a destination column
// named using characters unavailable for protobuf field names (e.g. unicode
// characters).
//
// More details about BigQuery naming limitations can be found here:
// https://cloud.google.com/bigquery/docs/schemas#column_names
//
// This extension is currently experimental.
//
// optional string column_name = 454943157;
E_ColumnName = &file_google_cloud_bigquery_storage_v1_annotations_proto_extTypes[0]
)
Extension fields to descriptorpb.FieldOptions.
File_google_cloud_bigquery_storage_v1_annotations_proto
var File_google_cloud_bigquery_storage_v1_annotations_proto protoreflect.FileDescriptor
File_google_cloud_bigquery_storage_v1_arrow_proto
var File_google_cloud_bigquery_storage_v1_arrow_proto protoreflect.FileDescriptor
File_google_cloud_bigquery_storage_v1_avro_proto
var File_google_cloud_bigquery_storage_v1_avro_proto protoreflect.FileDescriptor
File_google_cloud_bigquery_storage_v1_protobuf_proto
var File_google_cloud_bigquery_storage_v1_protobuf_proto protoreflect.FileDescriptor
File_google_cloud_bigquery_storage_v1_storage_proto
var File_google_cloud_bigquery_storage_v1_storage_proto protoreflect.FileDescriptor
File_google_cloud_bigquery_storage_v1_stream_proto
var File_google_cloud_bigquery_storage_v1_stream_proto protoreflect.FileDescriptor
File_google_cloud_bigquery_storage_v1_table_proto
var File_google_cloud_bigquery_storage_v1_table_proto protoreflect.FileDescriptor
Functions
func RegisterBigQueryReadServer
func RegisterBigQueryReadServer(s *grpc.Server, srv BigQueryReadServer)
func RegisterBigQueryWriteServer
func RegisterBigQueryWriteServer(s *grpc.Server, srv BigQueryWriteServer)
AppendRowsRequest
type AppendRowsRequest struct {
WriteStream string `protobuf:"bytes,1,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
Offset *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"`
Rows isAppendRowsRequest_Rows `protobuf_oneof:"rows"`
TraceId string `protobuf:"bytes,6,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
MissingValueInterpretations map[string]AppendRowsRequest_MissingValueInterpretation "" /* 316 byte string literal not displayed */
DefaultMissingValueInterpretation AppendRowsRequest_MissingValueInterpretation "" /* 248 byte string literal not displayed */
}
Request message for AppendRows
.
Because AppendRows is a bidirectional streaming RPC, certain parts of the AppendRowsRequest need only be specified for the first request before switching table destinations. You can also switch table destinations within the same connection for the default stream.
The size of a single AppendRowsRequest must be less than 10 MB in size.
Requests larger than this return an error, typically INVALID_ARGUMENT
.
func (*AppendRowsRequest) Descriptor
func (*AppendRowsRequest) Descriptor() ([]byte, []int)
Deprecated: Use AppendRowsRequest.ProtoReflect.Descriptor instead.
func (*AppendRowsRequest) GetArrowRows
func (x *AppendRowsRequest) GetArrowRows() *AppendRowsRequest_ArrowData
func (*AppendRowsRequest) GetDefaultMissingValueInterpretation
func (x *AppendRowsRequest) GetDefaultMissingValueInterpretation() AppendRowsRequest_MissingValueInterpretation
func (*AppendRowsRequest) GetMissingValueInterpretations
func (x *AppendRowsRequest) GetMissingValueInterpretations() map[string]AppendRowsRequest_MissingValueInterpretation
func (*AppendRowsRequest) GetOffset
func (x *AppendRowsRequest) GetOffset() *wrapperspb.Int64Value
func (*AppendRowsRequest) GetProtoRows
func (x *AppendRowsRequest) GetProtoRows() *AppendRowsRequest_ProtoData
func (*AppendRowsRequest) GetRows
func (m *AppendRowsRequest) GetRows() isAppendRowsRequest_Rows
func (*AppendRowsRequest) GetTraceId
func (x *AppendRowsRequest) GetTraceId() string
func (*AppendRowsRequest) GetWriteStream
func (x *AppendRowsRequest) GetWriteStream() string
func (*AppendRowsRequest) ProtoMessage
func (*AppendRowsRequest) ProtoMessage()
func (*AppendRowsRequest) ProtoReflect
func (x *AppendRowsRequest) ProtoReflect() protoreflect.Message
func (*AppendRowsRequest) Reset
func (x *AppendRowsRequest) Reset()
func (*AppendRowsRequest) String
func (x *AppendRowsRequest) String() string
AppendRowsRequest_ArrowData
type AppendRowsRequest_ArrowData struct {
// Optional. Arrow Schema used to serialize the data.
WriterSchema *ArrowSchema `protobuf:"bytes,1,opt,name=writer_schema,json=writerSchema,proto3" json:"writer_schema,omitempty"`
// Required. Serialized row data in Arrow format.
Rows *ArrowRecordBatch `protobuf:"bytes,2,opt,name=rows,proto3" json:"rows,omitempty"`
// contains filtered or unexported fields
}
Arrow schema and data. Arrow format is an experimental feature only selected for allowlisted customers.
func (*AppendRowsRequest_ArrowData) Descriptor
func (*AppendRowsRequest_ArrowData) Descriptor() ([]byte, []int)
Deprecated: Use AppendRowsRequest_ArrowData.ProtoReflect.Descriptor instead.
func (*AppendRowsRequest_ArrowData) GetRows
func (x *AppendRowsRequest_ArrowData) GetRows() *ArrowRecordBatch
func (*AppendRowsRequest_ArrowData) GetWriterSchema
func (x *AppendRowsRequest_ArrowData) GetWriterSchema() *ArrowSchema
func (*AppendRowsRequest_ArrowData) ProtoMessage
func (*AppendRowsRequest_ArrowData) ProtoMessage()
func (*AppendRowsRequest_ArrowData) ProtoReflect
func (x *AppendRowsRequest_ArrowData) ProtoReflect() protoreflect.Message
func (*AppendRowsRequest_ArrowData) Reset
func (x *AppendRowsRequest_ArrowData) Reset()
func (*AppendRowsRequest_ArrowData) String
func (x *AppendRowsRequest_ArrowData) String() string
AppendRowsRequest_ArrowRows
type AppendRowsRequest_ArrowRows struct {
// Rows in arrow format. This is an experimental feature only selected for
// allowlisted customers.
ArrowRows *AppendRowsRequest_ArrowData `protobuf:"bytes,5,opt,name=arrow_rows,json=arrowRows,proto3,oneof"`
}
AppendRowsRequest_MissingValueInterpretation
type AppendRowsRequest_MissingValueInterpretation int32
An enum to indicate how to interpret missing values of fields that are present in user schema but missing in rows. A missing value can represent a NULL or a column default value defined in BigQuery table schema.
AppendRowsRequest_MISSING_VALUE_INTERPRETATION_UNSPECIFIED, AppendRowsRequest_NULL_VALUE, AppendRowsRequest_DEFAULT_VALUE
const (
// Invalid missing value interpretation. Requests with this value will be
// rejected.
AppendRowsRequest_MISSING_VALUE_INTERPRETATION_UNSPECIFIED AppendRowsRequest_MissingValueInterpretation = 0
// Missing value is interpreted as NULL.
AppendRowsRequest_NULL_VALUE AppendRowsRequest_MissingValueInterpretation = 1
// Missing value is interpreted as column default value if declared in the
// table schema, NULL otherwise.
AppendRowsRequest_DEFAULT_VALUE AppendRowsRequest_MissingValueInterpretation = 2
)
func (AppendRowsRequest_MissingValueInterpretation) Descriptor
func (AppendRowsRequest_MissingValueInterpretation) Descriptor() protoreflect.EnumDescriptor
func (AppendRowsRequest_MissingValueInterpretation) Enum
func (x AppendRowsRequest_MissingValueInterpretation) Enum() *AppendRowsRequest_MissingValueInterpretation
func (AppendRowsRequest_MissingValueInterpretation) EnumDescriptor
func (AppendRowsRequest_MissingValueInterpretation) EnumDescriptor() ([]byte, []int)
Deprecated: Use AppendRowsRequest_MissingValueInterpretation.Descriptor instead.
func (AppendRowsRequest_MissingValueInterpretation) Number
func (x AppendRowsRequest_MissingValueInterpretation) Number() protoreflect.EnumNumber
func (AppendRowsRequest_MissingValueInterpretation) String
func (x AppendRowsRequest_MissingValueInterpretation) String() string
func (AppendRowsRequest_MissingValueInterpretation) Type
func (AppendRowsRequest_MissingValueInterpretation) Type() protoreflect.EnumType
AppendRowsRequest_ProtoData
type AppendRowsRequest_ProtoData struct {
// The protocol buffer schema used to serialize the data. Provide this value
// whenever:
//
// * You send the first request of an RPC connection.
//
// * You change the input schema.
//
// * You specify a new destination table.
WriterSchema *ProtoSchema `protobuf:"bytes,1,opt,name=writer_schema,json=writerSchema,proto3" json:"writer_schema,omitempty"`
// Serialized row data in protobuf message format.
// Currently, the backend expects the serialized rows to adhere to
// proto2 semantics when appending rows, particularly with respect to
// how default values are encoded.
Rows *ProtoRows `protobuf:"bytes,2,opt,name=rows,proto3" json:"rows,omitempty"`
// contains filtered or unexported fields
}
ProtoData contains the data rows and schema when constructing append requests.
func (*AppendRowsRequest_ProtoData) Descriptor
func (*AppendRowsRequest_ProtoData) Descriptor() ([]byte, []int)
Deprecated: Use AppendRowsRequest_ProtoData.ProtoReflect.Descriptor instead.
func (*AppendRowsRequest_ProtoData) GetRows
func (x *AppendRowsRequest_ProtoData) GetRows() *ProtoRows
func (*AppendRowsRequest_ProtoData) GetWriterSchema
func (x *AppendRowsRequest_ProtoData) GetWriterSchema() *ProtoSchema
func (*AppendRowsRequest_ProtoData) ProtoMessage
func (*AppendRowsRequest_ProtoData) ProtoMessage()
func (*AppendRowsRequest_ProtoData) ProtoReflect
func (x *AppendRowsRequest_ProtoData) ProtoReflect() protoreflect.Message
func (*AppendRowsRequest_ProtoData) Reset
func (x *AppendRowsRequest_ProtoData) Reset()
func (*AppendRowsRequest_ProtoData) String
func (x *AppendRowsRequest_ProtoData) String() string
AppendRowsRequest_ProtoRows
type AppendRowsRequest_ProtoRows struct {
// Rows in proto format.
ProtoRows *AppendRowsRequest_ProtoData `protobuf:"bytes,4,opt,name=proto_rows,json=protoRows,proto3,oneof"`
}
AppendRowsResponse
type AppendRowsResponse struct {
// Types that are assignable to Response:
//
// *AppendRowsResponse_AppendResult_
// *AppendRowsResponse_Error
Response isAppendRowsResponse_Response `protobuf_oneof:"response"`
// If backend detects a schema update, pass it to user so that user can
// use it to input new type of message. It will be empty when no schema
// updates have occurred.
UpdatedSchema *TableSchema `protobuf:"bytes,3,opt,name=updated_schema,json=updatedSchema,proto3" json:"updated_schema,omitempty"`
// If a request failed due to corrupted rows, no rows in the batch will be
// appended. The API will return row level error info, so that the caller can
// remove the bad rows and retry the request.
RowErrors []*RowError `protobuf:"bytes,4,rep,name=row_errors,json=rowErrors,proto3" json:"row_errors,omitempty"`
// The target of the append operation. Matches the write_stream in the
// corresponding request.
WriteStream string `protobuf:"bytes,5,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
// contains filtered or unexported fields
}
Response message for AppendRows
.
func (*AppendRowsResponse) Descriptor
func (*AppendRowsResponse) Descriptor() ([]byte, []int)
Deprecated: Use AppendRowsResponse.ProtoReflect.Descriptor instead.
func (*AppendRowsResponse) GetAppendResult
func (x *AppendRowsResponse) GetAppendResult() *AppendRowsResponse_AppendResult
func (*AppendRowsResponse) GetError
func (x *AppendRowsResponse) GetError() *status.Status
func (*AppendRowsResponse) GetResponse
func (m *AppendRowsResponse) GetResponse() isAppendRowsResponse_Response
func (*AppendRowsResponse) GetRowErrors
func (x *AppendRowsResponse) GetRowErrors() []*RowError
func (*AppendRowsResponse) GetUpdatedSchema
func (x *AppendRowsResponse) GetUpdatedSchema() *TableSchema
func (*AppendRowsResponse) GetWriteStream
func (x *AppendRowsResponse) GetWriteStream() string
func (*AppendRowsResponse) ProtoMessage
func (*AppendRowsResponse) ProtoMessage()
func (*AppendRowsResponse) ProtoReflect
func (x *AppendRowsResponse) ProtoReflect() protoreflect.Message
func (*AppendRowsResponse) Reset
func (x *AppendRowsResponse) Reset()
func (*AppendRowsResponse) String
func (x *AppendRowsResponse) String() string
AppendRowsResponse_AppendResult
type AppendRowsResponse_AppendResult struct {
// The row offset at which the last append occurred. The offset will not be
// set if appending using default streams.
Offset *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=offset,proto3" json:"offset,omitempty"`
// contains filtered or unexported fields
}
AppendResult is returned for successful append requests.
func (*AppendRowsResponse_AppendResult) Descriptor
func (*AppendRowsResponse_AppendResult) Descriptor() ([]byte, []int)
Deprecated: Use AppendRowsResponse_AppendResult.ProtoReflect.Descriptor instead.
func (*AppendRowsResponse_AppendResult) GetOffset
func (x *AppendRowsResponse_AppendResult) GetOffset() *wrapperspb.Int64Value
func (*AppendRowsResponse_AppendResult) ProtoMessage
func (*AppendRowsResponse_AppendResult) ProtoMessage()
func (*AppendRowsResponse_AppendResult) ProtoReflect
func (x *AppendRowsResponse_AppendResult) ProtoReflect() protoreflect.Message
func (*AppendRowsResponse_AppendResult) Reset
func (x *AppendRowsResponse_AppendResult) Reset()
func (*AppendRowsResponse_AppendResult) String
func (x *AppendRowsResponse_AppendResult) String() string
AppendRowsResponse_AppendResult_
type AppendRowsResponse_AppendResult_ struct {
// Result if the append is successful.
AppendResult *AppendRowsResponse_AppendResult `protobuf:"bytes,1,opt,name=append_result,json=appendResult,proto3,oneof"`
}
AppendRowsResponse_Error
type AppendRowsResponse_Error struct {
// Error returned when problems were encountered. If present,
// it indicates rows were not accepted into the system.
// Users can retry or continue with other append requests within the
// same connection.
//
// Additional information about error signalling:
//
// ALREADY_EXISTS: Happens when an append specified an offset, and the
// backend already has received data at this offset. Typically encountered
// in retry scenarios, and can be ignored.
//
// OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
// the current end of the stream.
//
// INVALID_ARGUMENT: Indicates a malformed request or data.
//
// ABORTED: Request processing is aborted because of prior failures. The
// request can be retried if previous failure is addressed.
//
// INTERNAL: Indicates server side error(s) that can be retried.
Error *status.Status `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
}
ArrowRecordBatch
type ArrowRecordBatch struct {
// IPC-serialized Arrow RecordBatch.
SerializedRecordBatch []byte `protobuf:"bytes,1,opt,name=serialized_record_batch,json=serializedRecordBatch,proto3" json:"serialized_record_batch,omitempty"`
// [Deprecated] The count of rows in `serialized_record_batch`.
// Please use the format-independent ReadRowsResponse.row_count instead.
//
// Deprecated: Marked as deprecated in google/cloud/bigquery/storage/v1/arrow.proto.
RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
// contains filtered or unexported fields
}
Arrow RecordBatch.
func (*ArrowRecordBatch) Descriptor
func (*ArrowRecordBatch) Descriptor() ([]byte, []int)
Deprecated: Use ArrowRecordBatch.ProtoReflect.Descriptor instead.
func (*ArrowRecordBatch) GetRowCount
func (x *ArrowRecordBatch) GetRowCount() int64
Deprecated: Marked as deprecated in google/cloud/bigquery/storage/v1/arrow.proto.
func (*ArrowRecordBatch) GetSerializedRecordBatch
func (x *ArrowRecordBatch) GetSerializedRecordBatch() []byte
func (*ArrowRecordBatch) ProtoMessage
func (*ArrowRecordBatch) ProtoMessage()
func (*ArrowRecordBatch) ProtoReflect
func (x *ArrowRecordBatch) ProtoReflect() protoreflect.Message
func (*ArrowRecordBatch) Reset
func (x *ArrowRecordBatch) Reset()
func (*ArrowRecordBatch) String
func (x *ArrowRecordBatch) String() string
ArrowSchema
type ArrowSchema struct {
// IPC serialized Arrow schema.
SerializedSchema []byte `protobuf:"bytes,1,opt,name=serialized_schema,json=serializedSchema,proto3" json:"serialized_schema,omitempty"`
// contains filtered or unexported fields
}
Arrow schema as specified in https://arrow.apache.org/docs/python/api/datatypes.html and serialized to bytes using IPC: https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc
See code samples on how this message can be deserialized.
func (*ArrowSchema) Descriptor
func (*ArrowSchema) Descriptor() ([]byte, []int)
Deprecated: Use ArrowSchema.ProtoReflect.Descriptor instead.
func (*ArrowSchema) GetSerializedSchema
func (x *ArrowSchema) GetSerializedSchema() []byte
func (*ArrowSchema) ProtoMessage
func (*ArrowSchema) ProtoMessage()
func (*ArrowSchema) ProtoReflect
func (x *ArrowSchema) ProtoReflect() protoreflect.Message
func (*ArrowSchema) Reset
func (x *ArrowSchema) Reset()
func (*ArrowSchema) String
func (x *ArrowSchema) String() string
ArrowSerializationOptions
type ArrowSerializationOptions struct {
BufferCompression ArrowSerializationOptions_CompressionCodec "" /* 194 byte string literal not displayed */
}
Contains options specific to Arrow Serialization.
func (*ArrowSerializationOptions) Descriptor
func (*ArrowSerializationOptions) Descriptor() ([]byte, []int)
Deprecated: Use ArrowSerializationOptions.ProtoReflect.Descriptor instead.
func (*ArrowSerializationOptions) GetBufferCompression
func (x *ArrowSerializationOptions) GetBufferCompression() ArrowSerializationOptions_CompressionCodec
func (*ArrowSerializationOptions) ProtoMessage
func (*ArrowSerializationOptions) ProtoMessage()
func (*ArrowSerializationOptions) ProtoReflect
func (x *ArrowSerializationOptions) ProtoReflect() protoreflect.Message
func (*ArrowSerializationOptions) Reset
func (x *ArrowSerializationOptions) Reset()
func (*ArrowSerializationOptions) String
func (x *ArrowSerializationOptions) String() string
ArrowSerializationOptions_CompressionCodec
type ArrowSerializationOptions_CompressionCodec int32
Compression codec's supported by Arrow.
ArrowSerializationOptions_COMPRESSION_UNSPECIFIED, ArrowSerializationOptions_LZ4_FRAME, ArrowSerializationOptions_ZSTD
const (
// If unspecified no compression will be used.
ArrowSerializationOptions_COMPRESSION_UNSPECIFIED ArrowSerializationOptions_CompressionCodec = 0
// LZ4 Frame (https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md)
ArrowSerializationOptions_LZ4_FRAME ArrowSerializationOptions_CompressionCodec = 1
// Zstandard compression.
ArrowSerializationOptions_ZSTD ArrowSerializationOptions_CompressionCodec = 2
)
func (ArrowSerializationOptions_CompressionCodec) Descriptor
func (ArrowSerializationOptions_CompressionCodec) Descriptor() protoreflect.EnumDescriptor
func (ArrowSerializationOptions_CompressionCodec) Enum
func (x ArrowSerializationOptions_CompressionCodec) Enum() *ArrowSerializationOptions_CompressionCodec
func (ArrowSerializationOptions_CompressionCodec) EnumDescriptor
func (ArrowSerializationOptions_CompressionCodec) EnumDescriptor() ([]byte, []int)
Deprecated: Use ArrowSerializationOptions_CompressionCodec.Descriptor instead.
func (ArrowSerializationOptions_CompressionCodec) Number
func (x ArrowSerializationOptions_CompressionCodec) Number() protoreflect.EnumNumber
func (ArrowSerializationOptions_CompressionCodec) String
func (x ArrowSerializationOptions_CompressionCodec) String() string
func (ArrowSerializationOptions_CompressionCodec) Type
func (ArrowSerializationOptions_CompressionCodec) Type() protoreflect.EnumType
AvroRows
type AvroRows struct {
// Binary serialized rows in a block.
SerializedBinaryRows []byte `protobuf:"bytes,1,opt,name=serialized_binary_rows,json=serializedBinaryRows,proto3" json:"serialized_binary_rows,omitempty"`
// [Deprecated] The count of rows in the returning block.
// Please use the format-independent ReadRowsResponse.row_count instead.
//
// Deprecated: Marked as deprecated in google/cloud/bigquery/storage/v1/avro.proto.
RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
// contains filtered or unexported fields
}
Avro rows.
func (*AvroRows) Descriptor
Deprecated: Use AvroRows.ProtoReflect.Descriptor instead.
func (*AvroRows) GetRowCount
Deprecated: Marked as deprecated in google/cloud/bigquery/storage/v1/avro.proto.
func (*AvroRows) GetSerializedBinaryRows
func (*AvroRows) ProtoMessage
func (*AvroRows) ProtoMessage()
func (*AvroRows) ProtoReflect
func (x *AvroRows) ProtoReflect() protoreflect.Message
func (*AvroRows) Reset
func (x *AvroRows) Reset()
func (*AvroRows) String
AvroSchema
type AvroSchema struct {
// Json serialized schema, as described at
// https://avro.apache.org/docs/1.8.1/spec.html.
Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"`
// contains filtered or unexported fields
}
Avro schema.
func (*AvroSchema) Descriptor
func (*AvroSchema) Descriptor() ([]byte, []int)
Deprecated: Use AvroSchema.ProtoReflect.Descriptor instead.
func (*AvroSchema) GetSchema
func (x *AvroSchema) GetSchema() string
func (*AvroSchema) ProtoMessage
func (*AvroSchema) ProtoMessage()
func (*AvroSchema) ProtoReflect
func (x *AvroSchema) ProtoReflect() protoreflect.Message
func (*AvroSchema) Reset
func (x *AvroSchema) Reset()
func (*AvroSchema) String
func (x *AvroSchema) String() string
AvroSerializationOptions
type AvroSerializationOptions struct {
EnableDisplayNameAttribute bool "" /* 144 byte string literal not displayed */
}
Contains options specific to Avro Serialization.
func (*AvroSerializationOptions) Descriptor
func (*AvroSerializationOptions) Descriptor() ([]byte, []int)
Deprecated: Use AvroSerializationOptions.ProtoReflect.Descriptor instead.
func (*AvroSerializationOptions) GetEnableDisplayNameAttribute
func (x *AvroSerializationOptions) GetEnableDisplayNameAttribute() bool
func (*AvroSerializationOptions) ProtoMessage
func (*AvroSerializationOptions) ProtoMessage()
func (*AvroSerializationOptions) ProtoReflect
func (x *AvroSerializationOptions) ProtoReflect() protoreflect.Message
func (*AvroSerializationOptions) Reset
func (x *AvroSerializationOptions) Reset()
func (*AvroSerializationOptions) String
func (x *AvroSerializationOptions) String() string
BatchCommitWriteStreamsRequest
type BatchCommitWriteStreamsRequest struct {
// Required. Parent table that all the streams should belong to, in the form
// of `projects/{project}/datasets/{dataset}/tables/{table}`.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. The group of streams that will be committed atomically.
WriteStreams []string `protobuf:"bytes,2,rep,name=write_streams,json=writeStreams,proto3" json:"write_streams,omitempty"`
// contains filtered or unexported fields
}
Request message for BatchCommitWriteStreams
.
func (*BatchCommitWriteStreamsRequest) Descriptor
func (*BatchCommitWriteStreamsRequest) Descriptor() ([]byte, []int)
Deprecated: Use BatchCommitWriteStreamsRequest.ProtoReflect.Descriptor instead.
func (*BatchCommitWriteStreamsRequest) GetParent
func (x *BatchCommitWriteStreamsRequest) GetParent() string
func (*BatchCommitWriteStreamsRequest) GetWriteStreams
func (x *BatchCommitWriteStreamsRequest) GetWriteStreams() []string
func (*BatchCommitWriteStreamsRequest) ProtoMessage
func (*BatchCommitWriteStreamsRequest) ProtoMessage()
func (*BatchCommitWriteStreamsRequest) ProtoReflect
func (x *BatchCommitWriteStreamsRequest) ProtoReflect() protoreflect.Message
func (*BatchCommitWriteStreamsRequest) Reset
func (x *BatchCommitWriteStreamsRequest) Reset()
func (*BatchCommitWriteStreamsRequest) String
func (x *BatchCommitWriteStreamsRequest) String() string
BatchCommitWriteStreamsResponse
type BatchCommitWriteStreamsResponse struct {
// The time at which streams were committed in microseconds granularity.
// This field will only exist when there are no stream errors.
// **Note** if this field is not set, it means the commit was not successful.
CommitTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=commit_time,json=commitTime,proto3" json:"commit_time,omitempty"`
// Stream level error if commit failed. Only streams with error will be in
// the list.
// If empty, there is no error and all streams are committed successfully.
// If non empty, certain streams have errors and ZERO stream is committed due
// to atomicity guarantee.
StreamErrors []*StorageError `protobuf:"bytes,2,rep,name=stream_errors,json=streamErrors,proto3" json:"stream_errors,omitempty"`
// contains filtered or unexported fields
}
Response message for BatchCommitWriteStreams
.
func (*BatchCommitWriteStreamsResponse) Descriptor
func (*BatchCommitWriteStreamsResponse) Descriptor() ([]byte, []int)
Deprecated: Use BatchCommitWriteStreamsResponse.ProtoReflect.Descriptor instead.
func (*BatchCommitWriteStreamsResponse) GetCommitTime
func (x *BatchCommitWriteStreamsResponse) GetCommitTime() *timestamppb.Timestamp
func (*BatchCommitWriteStreamsResponse) GetStreamErrors
func (x *BatchCommitWriteStreamsResponse) GetStreamErrors() []*StorageError
func (*BatchCommitWriteStreamsResponse) ProtoMessage
func (*BatchCommitWriteStreamsResponse) ProtoMessage()
func (*BatchCommitWriteStreamsResponse) ProtoReflect
func (x *BatchCommitWriteStreamsResponse) ProtoReflect() protoreflect.Message
func (*BatchCommitWriteStreamsResponse) Reset
func (x *BatchCommitWriteStreamsResponse) Reset()
func (*BatchCommitWriteStreamsResponse) String
func (x *BatchCommitWriteStreamsResponse) String() string
BigQueryReadClient
type BigQueryReadClient interface {
// Creates a new read session. A read session divides the contents of a
// BigQuery table into one or more streams, which can then be used to read
// data from the table. The read session also specifies properties of the
// data to be read, such as a list of columns or a push-down filter describing
// the rows to be returned.
//
// A particular row can be read by at most one stream. When the caller has
// reached the end of each stream in the session, then all the data in the
// table has been read.
//
// Data is assigned to each stream such that roughly the same number of
// rows can be read from each stream. Because the server-side unit for
// assigning data is collections of rows, the API does not guarantee that
// each stream will return the same number or rows. Additionally, the
// limits are enforced based on the number of pre-filtered rows, so some
// filters can lead to lopsided assignments.
//
// Read sessions automatically expire 6 hours after they are created and do
// not require manual clean-up by the caller.
CreateReadSession(ctx context.Context, in *CreateReadSessionRequest, opts ...grpc.CallOption) (*ReadSession, error)
// Reads rows from the stream in the format prescribed by the ReadSession.
// Each response contains one or more table rows, up to a maximum of 100 MiB
// per response; read requests which attempt to read individual rows larger
// than 100 MiB will fail.
//
// Each request also returns a set of stream statistics reflecting the current
// state of the stream.
ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigQueryRead_ReadRowsClient, error)
// Splits a given `ReadStream` into two `ReadStream` objects. These
// `ReadStream` objects are referred to as the primary and the residual
// streams of the split. The original `ReadStream` can still be read from in
// the same manner as before. Both of the returned `ReadStream` objects can
// also be read from, and the rows returned by both child streams will be
// the same as the rows read from the original stream.
//
// Moreover, the two child streams will be allocated back-to-back in the
// original `ReadStream`. Concretely, it is guaranteed that for streams
// original, primary, and residual, that original[0-j] = primary[0-j] and
// original[j-n] = residual[0-m] once the streams have been read to
// completion.
SplitReadStream(ctx context.Context, in *SplitReadStreamRequest, opts ...grpc.CallOption) (*SplitReadStreamResponse, error)
}
BigQueryReadClient is the client API for BigQueryRead service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewBigQueryReadClient
func NewBigQueryReadClient(cc grpc.ClientConnInterface) BigQueryReadClient
BigQueryReadServer
type BigQueryReadServer interface {
// Creates a new read session. A read session divides the contents of a
// BigQuery table into one or more streams, which can then be used to read
// data from the table. The read session also specifies properties of the
// data to be read, such as a list of columns or a push-down filter describing
// the rows to be returned.
//
// A particular row can be read by at most one stream. When the caller has
// reached the end of each stream in the session, then all the data in the
// table has been read.
//
// Data is assigned to each stream such that roughly the same number of
// rows can be read from each stream. Because the server-side unit for
// assigning data is collections of rows, the API does not guarantee that
// each stream will return the same number or rows. Additionally, the
// limits are enforced based on the number of pre-filtered rows, so some
// filters can lead to lopsided assignments.
//
// Read sessions automatically expire 6 hours after they are created and do
// not require manual clean-up by the caller.
CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error)
// Reads rows from the stream in the format prescribed by the ReadSession.
// Each response contains one or more table rows, up to a maximum of 100 MiB
// per response; read requests which attempt to read individual rows larger
// than 100 MiB will fail.
//
// Each request also returns a set of stream statistics reflecting the current
// state of the stream.
ReadRows(*ReadRowsRequest, BigQueryRead_ReadRowsServer) error
// Splits a given `ReadStream` into two `ReadStream` objects. These
// `ReadStream` objects are referred to as the primary and the residual
// streams of the split. The original `ReadStream` can still be read from in
// the same manner as before. Both of the returned `ReadStream` objects can
// also be read from, and the rows returned by both child streams will be
// the same as the rows read from the original stream.
//
// Moreover, the two child streams will be allocated back-to-back in the
// original `ReadStream`. Concretely, it is guaranteed that for streams
// original, primary, and residual, that original[0-j] = primary[0-j] and
// original[j-n] = residual[0-m] once the streams have been read to
// completion.
SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error)
}
BigQueryReadServer is the server API for BigQueryRead service.
BigQueryRead_ReadRowsClient
type BigQueryRead_ReadRowsClient interface {
Recv() (*ReadRowsResponse, error)
grpc.ClientStream
}
BigQueryRead_ReadRowsServer
type BigQueryRead_ReadRowsServer interface {
Send(*ReadRowsResponse) error
grpc.ServerStream
}
BigQueryWriteClient
type BigQueryWriteClient interface {
// Creates a write stream to the given table.
// Additionally, every table has a special stream named '_default'
// to which data can be written. This stream doesn't need to be created using
// CreateWriteStream. It is a stream that can be used simultaneously by any
// number of clients. Data written to this stream is considered committed as
// soon as an acknowledgement is received.
CreateWriteStream(ctx context.Context, in *CreateWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error)
// Appends data to the given stream.
//
// If `offset` is specified, the `offset` is checked against the end of
// stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
// attempt is made to append to an offset beyond the current end of the stream
// or `ALREADY_EXISTS` if user provides an `offset` that has already been
// written to. User can retry with adjusted offset within the same RPC
// connection. If `offset` is not specified, append happens at the end of the
// stream.
//
// The response contains an optional offset at which the append
// happened. No offset information will be returned for appends to a
// default stream.
//
// Responses are received in the same order in which requests are sent.
// There will be one response for each successful inserted request. Responses
// may optionally embed error information if the originating AppendRequest was
// not successfully processed.
//
// The specifics of when successfully appended data is made visible to the
// table are governed by the type of stream:
//
// * For COMMITTED streams (which includes the default stream), data is
// visible immediately upon successful append.
//
// * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
// rpc which advances a cursor to a newer offset in the stream.
//
// * For PENDING streams, data is not made visible until the stream itself is
// finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
// committed via the `BatchCommitWriteStreams` rpc.
AppendRows(ctx context.Context, opts ...grpc.CallOption) (BigQueryWrite_AppendRowsClient, error)
// Gets information about a write stream.
GetWriteStream(ctx context.Context, in *GetWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error)
// Finalize a write stream so that no new data can be appended to the
// stream. Finalize is not supported on the '_default' stream.
FinalizeWriteStream(ctx context.Context, in *FinalizeWriteStreamRequest, opts ...grpc.CallOption) (*FinalizeWriteStreamResponse, error)
// Atomically commits a group of `PENDING` streams that belong to the same
// `parent` table.
//
// Streams must be finalized before commit and cannot be committed multiple
// times. Once a stream is committed, data in the stream becomes available
// for read operations.
BatchCommitWriteStreams(ctx context.Context, in *BatchCommitWriteStreamsRequest, opts ...grpc.CallOption) (*BatchCommitWriteStreamsResponse, error)
// Flushes rows to a BUFFERED stream.
//
// If users are appending rows to BUFFERED stream, flush operation is
// required in order for the rows to become available for reading. A
// Flush operation flushes up to any previously flushed offset in a BUFFERED
// stream, to the offset specified in the request.
//
// Flush is not supported on the _default stream, since it is not BUFFERED.
FlushRows(ctx context.Context, in *FlushRowsRequest, opts ...grpc.CallOption) (*FlushRowsResponse, error)
}
BigQueryWriteClient is the client API for BigQueryWrite service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewBigQueryWriteClient
func NewBigQueryWriteClient(cc grpc.ClientConnInterface) BigQueryWriteClient
BigQueryWriteServer
type BigQueryWriteServer interface {
// Creates a write stream to the given table.
// Additionally, every table has a special stream named '_default'
// to which data can be written. This stream doesn't need to be created using
// CreateWriteStream. It is a stream that can be used simultaneously by any
// number of clients. Data written to this stream is considered committed as
// soon as an acknowledgement is received.
CreateWriteStream(context.Context, *CreateWriteStreamRequest) (*WriteStream, error)
// Appends data to the given stream.
//
// If `offset` is specified, the `offset` is checked against the end of
// stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
// attempt is made to append to an offset beyond the current end of the stream
// or `ALREADY_EXISTS` if user provides an `offset` that has already been
// written to. User can retry with adjusted offset within the same RPC
// connection. If `offset` is not specified, append happens at the end of the
// stream.
//
// The response contains an optional offset at which the append
// happened. No offset information will be returned for appends to a
// default stream.
//
// Responses are received in the same order in which requests are sent.
// There will be one response for each successful inserted request. Responses
// may optionally embed error information if the originating AppendRequest was
// not successfully processed.
//
// The specifics of when successfully appended data is made visible to the
// table are governed by the type of stream:
//
// * For COMMITTED streams (which includes the default stream), data is
// visible immediately upon successful append.
//
// * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
// rpc which advances a cursor to a newer offset in the stream.
//
// * For PENDING streams, data is not made visible until the stream itself is
// finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
// committed via the `BatchCommitWriteStreams` rpc.
AppendRows(BigQueryWrite_AppendRowsServer) error
// Gets information about a write stream.
GetWriteStream(context.Context, *GetWriteStreamRequest) (*WriteStream, error)
// Finalize a write stream so that no new data can be appended to the
// stream. Finalize is not supported on the '_default' stream.
FinalizeWriteStream(context.Context, *FinalizeWriteStreamRequest) (*FinalizeWriteStreamResponse, error)
// Atomically commits a group of `PENDING` streams that belong to the same
// `parent` table.
//
// Streams must be finalized before commit and cannot be committed multiple
// times. Once a stream is committed, data in the stream becomes available
// for read operations.
BatchCommitWriteStreams(context.Context, *BatchCommitWriteStreamsRequest) (*BatchCommitWriteStreamsResponse, error)
// Flushes rows to a BUFFERED stream.
//
// If users are appending rows to BUFFERED stream, flush operation is
// required in order for the rows to become available for reading. A
// Flush operation flushes up to any previously flushed offset in a BUFFERED
// stream, to the offset specified in the request.
//
// Flush is not supported on the _default stream, since it is not BUFFERED.
FlushRows(context.Context, *FlushRowsRequest) (*FlushRowsResponse, error)
}
BigQueryWriteServer is the server API for BigQueryWrite service.
BigQueryWrite_AppendRowsClient
type BigQueryWrite_AppendRowsClient interface {
Send(*AppendRowsRequest) error
Recv() (*AppendRowsResponse, error)
grpc.ClientStream
}
BigQueryWrite_AppendRowsServer
type BigQueryWrite_AppendRowsServer interface {
Send(*AppendRowsResponse) error
Recv() (*AppendRowsRequest, error)
grpc.ServerStream
}
CreateReadSessionRequest
type CreateReadSessionRequest struct {
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
ReadSession *ReadSession `protobuf:"bytes,2,opt,name=read_session,json=readSession,proto3" json:"read_session,omitempty"`
MaxStreamCount int32 `protobuf:"varint,3,opt,name=max_stream_count,json=maxStreamCount,proto3" json:"max_stream_count,omitempty"`
PreferredMinStreamCount int32 "" /* 135 byte string literal not displayed */
}
Request message for CreateReadSession
.
func (*CreateReadSessionRequest) Descriptor
func (*CreateReadSessionRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateReadSessionRequest.ProtoReflect.Descriptor instead.
func (*CreateReadSessionRequest) GetMaxStreamCount
func (x *CreateReadSessionRequest) GetMaxStreamCount() int32
func (*CreateReadSessionRequest) GetParent
func (x *CreateReadSessionRequest) GetParent() string
func (*CreateReadSessionRequest) GetPreferredMinStreamCount
func (x *CreateReadSessionRequest) GetPreferredMinStreamCount() int32
func (*CreateReadSessionRequest) GetReadSession
func (x *CreateReadSessionRequest) GetReadSession() *ReadSession
func (*CreateReadSessionRequest) ProtoMessage
func (*CreateReadSessionRequest) ProtoMessage()
func (*CreateReadSessionRequest) ProtoReflect
func (x *CreateReadSessionRequest) ProtoReflect() protoreflect.Message
func (*CreateReadSessionRequest) Reset
func (x *CreateReadSessionRequest) Reset()
func (*CreateReadSessionRequest) String
func (x *CreateReadSessionRequest) String() string
CreateWriteStreamRequest
type CreateWriteStreamRequest struct {
// Required. Reference to the table to which the stream belongs, in the format
// of `projects/{project}/datasets/{dataset}/tables/{table}`.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. Stream to be created.
WriteStream *WriteStream `protobuf:"bytes,2,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
// contains filtered or unexported fields
}
Request message for CreateWriteStream
.
func (*CreateWriteStreamRequest) Descriptor
func (*CreateWriteStreamRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateWriteStreamRequest.ProtoReflect.Descriptor instead.
func (*CreateWriteStreamRequest) GetParent
func (x *CreateWriteStreamRequest) GetParent() string
func (*CreateWriteStreamRequest) GetWriteStream
func (x *CreateWriteStreamRequest) GetWriteStream() *WriteStream
func (*CreateWriteStreamRequest) ProtoMessage
func (*CreateWriteStreamRequest) ProtoMessage()
func (*CreateWriteStreamRequest) ProtoReflect
func (x *CreateWriteStreamRequest) ProtoReflect() protoreflect.Message
func (*CreateWriteStreamRequest) Reset
func (x *CreateWriteStreamRequest) Reset()
func (*CreateWriteStreamRequest) String
func (x *CreateWriteStreamRequest) String() string
DataFormat
type DataFormat int32
Data format for input or output data.
DataFormat_DATA_FORMAT_UNSPECIFIED, DataFormat_AVRO, DataFormat_ARROW
const (
// Data format is unspecified.
DataFormat_DATA_FORMAT_UNSPECIFIED DataFormat = 0
// Avro is a standard open source row based file format.
// See https://avro.apache.org/ for more details.
DataFormat_AVRO DataFormat = 1
// Arrow is a standard open source column-based message format.
// See https://arrow.apache.org/ for more details.
DataFormat_ARROW DataFormat = 2
)
func (DataFormat) Descriptor
func (DataFormat) Descriptor() protoreflect.EnumDescriptor
func (DataFormat) Enum
func (x DataFormat) Enum() *DataFormat
func (DataFormat) EnumDescriptor
func (DataFormat) EnumDescriptor() ([]byte, []int)
Deprecated: Use DataFormat.Descriptor instead.
func (DataFormat) Number
func (x DataFormat) Number() protoreflect.EnumNumber
func (DataFormat) String
func (x DataFormat) String() string
func (DataFormat) Type
func (DataFormat) Type() protoreflect.EnumType
FinalizeWriteStreamRequest
type FinalizeWriteStreamRequest struct {
// Required. Name of the stream to finalize, in the form of
// `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
Request message for invoking FinalizeWriteStream
.
func (*FinalizeWriteStreamRequest) Descriptor
func (*FinalizeWriteStreamRequest) Descriptor() ([]byte, []int)
Deprecated: Use FinalizeWriteStreamRequest.ProtoReflect.Descriptor instead.
func (*FinalizeWriteStreamRequest) GetName
func (x *FinalizeWriteStreamRequest) GetName() string
func (*FinalizeWriteStreamRequest) ProtoMessage
func (*FinalizeWriteStreamRequest) ProtoMessage()
func (*FinalizeWriteStreamRequest) ProtoReflect
func (x *FinalizeWriteStreamRequest) ProtoReflect() protoreflect.Message
func (*FinalizeWriteStreamRequest) Reset
func (x *FinalizeWriteStreamRequest) Reset()
func (*FinalizeWriteStreamRequest) String
func (x *FinalizeWriteStreamRequest) String() string
FinalizeWriteStreamResponse
type FinalizeWriteStreamResponse struct {
// Number of rows in the finalized stream.
RowCount int64 `protobuf:"varint,1,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
// contains filtered or unexported fields
}
Response message for FinalizeWriteStream
.
func (*FinalizeWriteStreamResponse) Descriptor
func (*FinalizeWriteStreamResponse) Descriptor() ([]byte, []int)
Deprecated: Use FinalizeWriteStreamResponse.ProtoReflect.Descriptor instead.
func (*FinalizeWriteStreamResponse) GetRowCount
func (x *FinalizeWriteStreamResponse) GetRowCount() int64
func (*FinalizeWriteStreamResponse) ProtoMessage
func (*FinalizeWriteStreamResponse) ProtoMessage()
func (*FinalizeWriteStreamResponse) ProtoReflect
func (x *FinalizeWriteStreamResponse) ProtoReflect() protoreflect.Message
func (*FinalizeWriteStreamResponse) Reset
func (x *FinalizeWriteStreamResponse) Reset()
func (*FinalizeWriteStreamResponse) String
func (x *FinalizeWriteStreamResponse) String() string
FlushRowsRequest
type FlushRowsRequest struct {
// Required. The stream that is the target of the flush operation.
WriteStream string `protobuf:"bytes,1,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
// Ending offset of the flush operation. Rows before this offset(including
// this offset) will be flushed.
Offset *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"`
// contains filtered or unexported fields
}
Request message for FlushRows
.
func (*FlushRowsRequest) Descriptor
func (*FlushRowsRequest) Descriptor() ([]byte, []int)
Deprecated: Use FlushRowsRequest.ProtoReflect.Descriptor instead.
func (*FlushRowsRequest) GetOffset
func (x *FlushRowsRequest) GetOffset() *wrapperspb.Int64Value
func (*FlushRowsRequest) GetWriteStream
func (x *FlushRowsRequest) GetWriteStream() string
func (*FlushRowsRequest) ProtoMessage
func (*FlushRowsRequest) ProtoMessage()
func (*FlushRowsRequest) ProtoReflect
func (x *FlushRowsRequest) ProtoReflect() protoreflect.Message
func (*FlushRowsRequest) Reset
func (x *FlushRowsRequest) Reset()
func (*FlushRowsRequest) String
func (x *FlushRowsRequest) String() string
FlushRowsResponse
type FlushRowsResponse struct {
// The rows before this offset (including this offset) are flushed.
Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
// contains filtered or unexported fields
}
Respond message for FlushRows
.
func (*FlushRowsResponse) Descriptor
func (*FlushRowsResponse) Descriptor() ([]byte, []int)
Deprecated: Use FlushRowsResponse.ProtoReflect.Descriptor instead.
func (*FlushRowsResponse) GetOffset
func (x *FlushRowsResponse) GetOffset() int64
func (*FlushRowsResponse) ProtoMessage
func (*FlushRowsResponse) ProtoMessage()
func (*FlushRowsResponse) ProtoReflect
func (x *FlushRowsResponse) ProtoReflect() protoreflect.Message
func (*FlushRowsResponse) Reset
func (x *FlushRowsResponse) Reset()
func (*FlushRowsResponse) String
func (x *FlushRowsResponse) String() string
GetWriteStreamRequest
type GetWriteStreamRequest struct {
// Required. Name of the stream to get, in the form of
// `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Indicates whether to get full or partial view of the WriteStream. If
// not set, view returned will be basic.
View WriteStreamView `protobuf:"varint,3,opt,name=view,proto3,enum=google.cloud.bigquery.storage.v1.WriteStreamView" json:"view,omitempty"`
// contains filtered or unexported fields
}
Request message for GetWriteStreamRequest
.
func (*GetWriteStreamRequest) Descriptor
func (*GetWriteStreamRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetWriteStreamRequest.ProtoReflect.Descriptor instead.
func (*GetWriteStreamRequest) GetName
func (x *GetWriteStreamRequest) GetName() string
func (*GetWriteStreamRequest) GetView
func (x *GetWriteStreamRequest) GetView() WriteStreamView
func (*GetWriteStreamRequest) ProtoMessage
func (*GetWriteStreamRequest) ProtoMessage()
func (*GetWriteStreamRequest) ProtoReflect
func (x *GetWriteStreamRequest) ProtoReflect() protoreflect.Message
func (*GetWriteStreamRequest) Reset
func (x *GetWriteStreamRequest) Reset()
func (*GetWriteStreamRequest) String
func (x *GetWriteStreamRequest) String() string
ProtoRows
type ProtoRows struct {
// A sequence of rows serialized as a Protocol Buffer.
//
// See https://developers.google.com/protocol-buffers/docs/overview for more
// information on deserializing this field.
SerializedRows [][]byte `protobuf:"bytes,1,rep,name=serialized_rows,json=serializedRows,proto3" json:"serialized_rows,omitempty"`
// contains filtered or unexported fields
}
func (*ProtoRows) Descriptor
Deprecated: Use ProtoRows.ProtoReflect.Descriptor instead.
func (*ProtoRows) GetSerializedRows
func (*ProtoRows) ProtoMessage
func (*ProtoRows) ProtoMessage()
func (*ProtoRows) ProtoReflect
func (x *ProtoRows) ProtoReflect() protoreflect.Message
func (*ProtoRows) Reset
func (x *ProtoRows) Reset()
func (*ProtoRows) String
ProtoSchema
type ProtoSchema struct {
// Descriptor for input message. The provided descriptor must be self
// contained, such that data rows sent can be fully decoded using only the
// single descriptor. For data rows that are compositions of multiple
// independent messages, this means the descriptor may need to be transformed
// to only use nested types:
// https://developers.google.com/protocol-buffers/docs/proto#nested
//
// For additional information for how proto types and values map onto BigQuery
// see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions
ProtoDescriptor *descriptorpb.DescriptorProto `protobuf:"bytes,1,opt,name=proto_descriptor,json=protoDescriptor,proto3" json:"proto_descriptor,omitempty"`
// contains filtered or unexported fields
}
ProtoSchema describes the schema of the serialized protocol buffer data rows.
func (*ProtoSchema) Descriptor
func (*ProtoSchema) Descriptor() ([]byte, []int)
Deprecated: Use ProtoSchema.ProtoReflect.Descriptor instead.
func (*ProtoSchema) GetProtoDescriptor
func (x *ProtoSchema) GetProtoDescriptor() *descriptorpb.DescriptorProto
func (*ProtoSchema) ProtoMessage
func (*ProtoSchema) ProtoMessage()
func (*ProtoSchema) ProtoReflect
func (x *ProtoSchema) ProtoReflect() protoreflect.Message
func (*ProtoSchema) Reset
func (x *ProtoSchema) Reset()
func (*ProtoSchema) String
func (x *ProtoSchema) String() string
ReadRowsRequest
type ReadRowsRequest struct {
// Required. Stream to read rows from.
ReadStream string `protobuf:"bytes,1,opt,name=read_stream,json=readStream,proto3" json:"read_stream,omitempty"`
// The offset requested must be less than the last row read from Read.
// Requesting a larger offset is undefined. If not specified, start reading
// from offset zero.
Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
// contains filtered or unexported fields
}
Request message for ReadRows
.
func (*ReadRowsRequest) Descriptor
func (*ReadRowsRequest) Descriptor() ([]byte, []int)
Deprecated: Use ReadRowsRequest.ProtoReflect.Descriptor instead.
func (*ReadRowsRequest) GetOffset
func (x *ReadRowsRequest) GetOffset() int64
func (*ReadRowsRequest) GetReadStream
func (x *ReadRowsRequest) GetReadStream() string
func (*ReadRowsRequest) ProtoMessage
func (*ReadRowsRequest) ProtoMessage()
func (*ReadRowsRequest) ProtoReflect
func (x *ReadRowsRequest) ProtoReflect() protoreflect.Message
func (*ReadRowsRequest) Reset
func (x *ReadRowsRequest) Reset()
func (*ReadRowsRequest) String
func (x *ReadRowsRequest) String() string
ReadRowsResponse
type ReadRowsResponse struct {
Rows isReadRowsResponse_Rows `protobuf_oneof:"rows"`
RowCount int64 `protobuf:"varint,6,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
Stats *StreamStats `protobuf:"bytes,2,opt,name=stats,proto3" json:"stats,omitempty"`
ThrottleState *ThrottleState `protobuf:"bytes,5,opt,name=throttle_state,json=throttleState,proto3" json:"throttle_state,omitempty"`
Schema isReadRowsResponse_Schema `protobuf_oneof:"schema"`
UncompressedByteSize *int64 "" /* 130 byte string literal not displayed */
}
Response from calling ReadRows
may include row data, progress and
throttling information.
func (*ReadRowsResponse) Descriptor
func (*ReadRowsResponse) Descriptor() ([]byte, []int)
Deprecated: Use ReadRowsResponse.ProtoReflect.Descriptor instead.
func (*ReadRowsResponse) GetArrowRecordBatch
func (x *ReadRowsResponse) GetArrowRecordBatch() *ArrowRecordBatch
func (*ReadRowsResponse) GetArrowSchema
func (x *ReadRowsResponse) GetArrowSchema() *ArrowSchema
func (*ReadRowsResponse) GetAvroRows
func (x *ReadRowsResponse) GetAvroRows() *AvroRows
func (*ReadRowsResponse) GetAvroSchema
func (x *ReadRowsResponse) GetAvroSchema() *AvroSchema
func (*ReadRowsResponse) GetRowCount
func (x *ReadRowsResponse) GetRowCount() int64
func (*ReadRowsResponse) GetRows
func (m *ReadRowsResponse) GetRows() isReadRowsResponse_Rows
func (*ReadRowsResponse) GetSchema
func (m *ReadRowsResponse) GetSchema() isReadRowsResponse_Schema
func (*ReadRowsResponse) GetStats
func (x *ReadRowsResponse) GetStats() *StreamStats
func (*ReadRowsResponse) GetThrottleState
func (x *ReadRowsResponse) GetThrottleState() *ThrottleState
func (*ReadRowsResponse) GetUncompressedByteSize
func (x *ReadRowsResponse) GetUncompressedByteSize() int64
func (*ReadRowsResponse) ProtoMessage
func (*ReadRowsResponse) ProtoMessage()
func (*ReadRowsResponse) ProtoReflect
func (x *ReadRowsResponse) ProtoReflect() protoreflect.Message
func (*ReadRowsResponse) Reset
func (x *ReadRowsResponse) Reset()
func (*ReadRowsResponse) String
func (x *ReadRowsResponse) String() string
ReadRowsResponse_ArrowRecordBatch
type ReadRowsResponse_ArrowRecordBatch struct {
// Serialized row data in Arrow RecordBatch format.
ArrowRecordBatch *ArrowRecordBatch `protobuf:"bytes,4,opt,name=arrow_record_batch,json=arrowRecordBatch,proto3,oneof"`
}
ReadRowsResponse_ArrowSchema
type ReadRowsResponse_ArrowSchema struct {
// Output only. Arrow schema.
ArrowSchema *ArrowSchema `protobuf:"bytes,8,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"`
}
ReadRowsResponse_AvroRows
type ReadRowsResponse_AvroRows struct {
// Serialized row data in AVRO format.
AvroRows *AvroRows `protobuf:"bytes,3,opt,name=avro_rows,json=avroRows,proto3,oneof"`
}
ReadRowsResponse_AvroSchema
type ReadRowsResponse_AvroSchema struct {
// Output only. Avro schema.
AvroSchema *AvroSchema `protobuf:"bytes,7,opt,name=avro_schema,json=avroSchema,proto3,oneof"`
}
ReadSession
type ReadSession struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
DataFormat DataFormat "" /* 141 byte string literal not displayed */
Schema isReadSession_Schema `protobuf_oneof:"schema"`
Table string `protobuf:"bytes,6,opt,name=table,proto3" json:"table,omitempty"`
TableModifiers *ReadSession_TableModifiers `protobuf:"bytes,7,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"`
ReadOptions *ReadSession_TableReadOptions `protobuf:"bytes,8,opt,name=read_options,json=readOptions,proto3" json:"read_options,omitempty"`
Streams []*ReadStream `protobuf:"bytes,10,rep,name=streams,proto3" json:"streams,omitempty"`
EstimatedTotalBytesScanned int64 "" /* 145 byte string literal not displayed */
EstimatedTotalPhysicalFileSize int64 "" /* 159 byte string literal not displayed */
EstimatedRowCount int64 `protobuf:"varint,14,opt,name=estimated_row_count,json=estimatedRowCount,proto3" json:"estimated_row_count,omitempty"`
TraceId string `protobuf:"bytes,13,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
}
Information about the ReadSession.
func (*ReadSession) Descriptor
func (*ReadSession) Descriptor() ([]byte, []int)
Deprecated: Use ReadSession.ProtoReflect.Descriptor instead.
func (*ReadSession) GetArrowSchema
func (x *ReadSession) GetArrowSchema() *ArrowSchema
func (*ReadSession) GetAvroSchema
func (x *ReadSession) GetAvroSchema() *AvroSchema
func (*ReadSession) GetDataFormat
func (x *ReadSession) GetDataFormat() DataFormat
func (*ReadSession) GetEstimatedRowCount
func (x *ReadSession) GetEstimatedRowCount() int64
func (*ReadSession) GetEstimatedTotalBytesScanned
func (x *ReadSession) GetEstimatedTotalBytesScanned() int64
func (*ReadSession) GetEstimatedTotalPhysicalFileSize
func (x *ReadSession) GetEstimatedTotalPhysicalFileSize() int64
func (*ReadSession) GetExpireTime
func (x *ReadSession) GetExpireTime() *timestamppb.Timestamp
func (*ReadSession) GetName
func (x *ReadSession) GetName() string
func (*ReadSession) GetReadOptions
func (x *ReadSession) GetReadOptions() *ReadSession_TableReadOptions
func (*ReadSession) GetSchema
func (m *ReadSession) GetSchema() isReadSession_Schema
func (*ReadSession) GetStreams
func (x *ReadSession) GetStreams() []*ReadStream
func (*ReadSession) GetTable
func (x *ReadSession) GetTable() string
func (*ReadSession) GetTableModifiers
func (x *ReadSession) GetTableModifiers() *ReadSession_TableModifiers
func (*ReadSession) GetTraceId
func (x *ReadSession) GetTraceId() string
func (*ReadSession) ProtoMessage
func (*ReadSession) ProtoMessage()
func (*ReadSession) ProtoReflect
func (x *ReadSession) ProtoReflect() protoreflect.Message
func (*ReadSession) Reset
func (x *ReadSession) Reset()
func (*ReadSession) String
func (x *ReadSession) String() string
ReadSession_ArrowSchema
type ReadSession_ArrowSchema struct {
// Output only. Arrow schema.
ArrowSchema *ArrowSchema `protobuf:"bytes,5,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"`
}
ReadSession_AvroSchema
type ReadSession_AvroSchema struct {
// Output only. Avro schema.
AvroSchema *AvroSchema `protobuf:"bytes,4,opt,name=avro_schema,json=avroSchema,proto3,oneof"`
}
ReadSession_TableModifiers
type ReadSession_TableModifiers struct {
// The snapshot time of the table. If not set, interpreted as now.
SnapshotTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"`
// contains filtered or unexported fields
}
Additional attributes when reading a table.
func (*ReadSession_TableModifiers) Descriptor
func (*ReadSession_TableModifiers) Descriptor() ([]byte, []int)
Deprecated: Use ReadSession_TableModifiers.ProtoReflect.Descriptor instead.
func (*ReadSession_TableModifiers) GetSnapshotTime
func (x *ReadSession_TableModifiers) GetSnapshotTime() *timestamppb.Timestamp
func (*ReadSession_TableModifiers) ProtoMessage
func (*ReadSession_TableModifiers) ProtoMessage()
func (*ReadSession_TableModifiers) ProtoReflect
func (x *ReadSession_TableModifiers) ProtoReflect() protoreflect.Message
func (*ReadSession_TableModifiers) Reset
func (x *ReadSession_TableModifiers) Reset()
func (*ReadSession_TableModifiers) String
func (x *ReadSession_TableModifiers) String() string
ReadSession_TableReadOptions
type ReadSession_TableReadOptions struct {
SelectedFields []string `protobuf:"bytes,1,rep,name=selected_fields,json=selectedFields,proto3" json:"selected_fields,omitempty"`
RowRestriction string `protobuf:"bytes,2,opt,name=row_restriction,json=rowRestriction,proto3" json:"row_restriction,omitempty"`
OutputFormatSerializationOptions isReadSession_TableReadOptions_OutputFormatSerializationOptions `protobuf_oneof:"output_format_serialization_options"`
SamplePercentage *float64 `protobuf:"fixed64,5,opt,name=sample_percentage,json=samplePercentage,proto3,oneof" json:"sample_percentage,omitempty"`
ResponseCompressionCodec *ReadSession_TableReadOptions_ResponseCompressionCodec "" /* 234 byte string literal not displayed */
}
Options dictating how we read a table.
func (*ReadSession_TableReadOptions) Descriptor
func (*ReadSession_TableReadOptions) Descriptor() ([]byte, []int)
Deprecated: Use ReadSession_TableReadOptions.ProtoReflect.Descriptor instead.
func (*ReadSession_TableReadOptions) GetArrowSerializationOptions
func (x *ReadSession_TableReadOptions) GetArrowSerializationOptions() *ArrowSerializationOptions
func (*ReadSession_TableReadOptions) GetAvroSerializationOptions
func (x *ReadSession_TableReadOptions) GetAvroSerializationOptions() *AvroSerializationOptions
func (*ReadSession_TableReadOptions) GetOutputFormatSerializationOptions
func (m *ReadSession_TableReadOptions) GetOutputFormatSerializationOptions() isReadSession_TableReadOptions_OutputFormatSerializationOptions
func (*ReadSession_TableReadOptions) GetResponseCompressionCodec
func (x *ReadSession_TableReadOptions) GetResponseCompressionCodec() ReadSession_TableReadOptions_ResponseCompressionCodec
func (*ReadSession_TableReadOptions) GetRowRestriction
func (x *ReadSession_TableReadOptions) GetRowRestriction() string
func (*ReadSession_TableReadOptions) GetSamplePercentage
func (x *ReadSession_TableReadOptions) GetSamplePercentage() float64
func (*ReadSession_TableReadOptions) GetSelectedFields
func (x *ReadSession_TableReadOptions) GetSelectedFields() []string
func (*ReadSession_TableReadOptions) ProtoMessage
func (*ReadSession_TableReadOptions) ProtoMessage()
func (*ReadSession_TableReadOptions) ProtoReflect
func (x *ReadSession_TableReadOptions) ProtoReflect() protoreflect.Message
func (*ReadSession_TableReadOptions) Reset
func (x *ReadSession_TableReadOptions) Reset()
func (*ReadSession_TableReadOptions) String
func (x *ReadSession_TableReadOptions) String() string
ReadSession_TableReadOptions_ArrowSerializationOptions
type ReadSession_TableReadOptions_ArrowSerializationOptions struct {
// Optional. Options specific to the Apache Arrow output format.
ArrowSerializationOptions *ArrowSerializationOptions `protobuf:"bytes,3,opt,name=arrow_serialization_options,json=arrowSerializationOptions,proto3,oneof"`
}
ReadSession_TableReadOptions_AvroSerializationOptions
type ReadSession_TableReadOptions_AvroSerializationOptions struct {
// Optional. Options specific to the Apache Avro output format
AvroSerializationOptions *AvroSerializationOptions `protobuf:"bytes,4,opt,name=avro_serialization_options,json=avroSerializationOptions,proto3,oneof"`
}
ReadSession_TableReadOptions_ResponseCompressionCodec
type ReadSession_TableReadOptions_ResponseCompressionCodec int32
Specifies which compression codec to attempt on the entire serialized response payload (either Arrow record batch or Avro rows). This is not to be confused with the Apache Arrow native compression codecs specified in ArrowSerializationOptions. For performance reasons, when creating a read session requesting Arrow responses, setting both native Arrow compression and application-level response compression will not be allowed - choose, at most, one kind of compression.
ReadSession_TableReadOptions_RESPONSE_COMPRESSION_CODEC_UNSPECIFIED, ReadSession_TableReadOptions_RESPONSE_COMPRESSION_CODEC_LZ4
const (
// Default is no compression.
ReadSession_TableReadOptions_RESPONSE_COMPRESSION_CODEC_UNSPECIFIED ReadSession_TableReadOptions_ResponseCompressionCodec = 0
// Use raw LZ4 compression.
ReadSession_TableReadOptions_RESPONSE_COMPRESSION_CODEC_LZ4 ReadSession_TableReadOptions_ResponseCompressionCodec = 2
)
func (ReadSession_TableReadOptions_ResponseCompressionCodec) Descriptor
func (ReadSession_TableReadOptions_ResponseCompressionCodec) Descriptor() protoreflect.EnumDescriptor
func (ReadSession_TableReadOptions_ResponseCompressionCodec) Enum
func (x ReadSession_TableReadOptions_ResponseCompressionCodec) Enum() *ReadSession_TableReadOptions_ResponseCompressionCodec
func (ReadSession_TableReadOptions_ResponseCompressionCodec) EnumDescriptor
func (ReadSession_TableReadOptions_ResponseCompressionCodec) EnumDescriptor() ([]byte, []int)
Deprecated: Use ReadSession_TableReadOptions_ResponseCompressionCodec.Descriptor instead.
func (ReadSession_TableReadOptions_ResponseCompressionCodec) Number
func (x ReadSession_TableReadOptions_ResponseCompressionCodec) Number() protoreflect.EnumNumber
func (ReadSession_TableReadOptions_ResponseCompressionCodec) String
func (x ReadSession_TableReadOptions_ResponseCompressionCodec) String() string
func (ReadSession_TableReadOptions_ResponseCompressionCodec) Type
ReadStream
type ReadStream struct {
// Output only. Name of the stream, in the form
// `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
Information about a single stream that gets data out of the storage system.
Most of the information about ReadStream
instances is aggregated, making
ReadStream
lightweight.
func (*ReadStream) Descriptor
func (*ReadStream) Descriptor() ([]byte, []int)
Deprecated: Use ReadStream.ProtoReflect.Descriptor instead.
func (*ReadStream) GetName
func (x *ReadStream) GetName() string
func (*ReadStream) ProtoMessage
func (*ReadStream) ProtoMessage()
func (*ReadStream) ProtoReflect
func (x *ReadStream) ProtoReflect() protoreflect.Message
func (*ReadStream) Reset
func (x *ReadStream) Reset()
func (*ReadStream) String
func (x *ReadStream) String() string
RowError
type RowError struct {
// Index of the malformed row in the request.
Index int64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
// Structured error reason for a row error.
Code RowError_RowErrorCode `protobuf:"varint,2,opt,name=code,proto3,enum=google.cloud.bigquery.storage.v1.RowError_RowErrorCode" json:"code,omitempty"`
// Description of the issue encountered when processing the row.
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
// contains filtered or unexported fields
}
The message that presents row level error info in a request.
func (*RowError) Descriptor
Deprecated: Use RowError.ProtoReflect.Descriptor instead.
func (*RowError) GetCode
func (x *RowError) GetCode() RowError_RowErrorCode
func (*RowError) GetIndex
func (*RowError) GetMessage
func (*RowError) ProtoMessage
func (*RowError) ProtoMessage()
func (*RowError) ProtoReflect
func (x *RowError) ProtoReflect() protoreflect.Message
func (*RowError) Reset
func (x *RowError) Reset()
func (*RowError) String
RowError_RowErrorCode
type RowError_RowErrorCode int32
Error code for RowError
.
RowError_ROW_ERROR_CODE_UNSPECIFIED, RowError_FIELDS_ERROR
const (
// Default error.
RowError_ROW_ERROR_CODE_UNSPECIFIED RowError_RowErrorCode = 0
// One or more fields in the row has errors.
RowError_FIELDS_ERROR RowError_RowErrorCode = 1
)
func (RowError_RowErrorCode) Descriptor
func (RowError_RowErrorCode) Descriptor() protoreflect.EnumDescriptor
func (RowError_RowErrorCode) Enum
func (x RowError_RowErrorCode) Enum() *RowError_RowErrorCode
func (RowError_RowErrorCode) EnumDescriptor
func (RowError_RowErrorCode) EnumDescriptor() ([]byte, []int)
Deprecated: Use RowError_RowErrorCode.Descriptor instead.
func (RowError_RowErrorCode) Number
func (x RowError_RowErrorCode) Number() protoreflect.EnumNumber
func (RowError_RowErrorCode) String
func (x RowError_RowErrorCode) String() string
func (RowError_RowErrorCode) Type
func (RowError_RowErrorCode) Type() protoreflect.EnumType
SplitReadStreamRequest
type SplitReadStreamRequest struct {
// Required. Name of the stream to split.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// A value in the range (0.0, 1.0) that specifies the fractional point at
// which the original stream should be split. The actual split point is
// evaluated on pre-filtered rows, so if a filter is provided, then there is
// no guarantee that the division of the rows between the new child streams
// will be proportional to this fractional value. Additionally, because the
// server-side unit for assigning data is collections of rows, this fraction
// will always map to a data storage boundary on the server side.
Fraction float64 `protobuf:"fixed64,2,opt,name=fraction,proto3" json:"fraction,omitempty"`
// contains filtered or unexported fields
}
Request message for SplitReadStream
.
func (*SplitReadStreamRequest) Descriptor
func (*SplitReadStreamRequest) Descriptor() ([]byte, []int)
Deprecated: Use SplitReadStreamRequest.ProtoReflect.Descriptor instead.
func (*SplitReadStreamRequest) GetFraction
func (x *SplitReadStreamRequest) GetFraction() float64
func (*SplitReadStreamRequest) GetName
func (x *SplitReadStreamRequest) GetName() string
func (*SplitReadStreamRequest) ProtoMessage
func (*SplitReadStreamRequest) ProtoMessage()
func (*SplitReadStreamRequest) ProtoReflect
func (x *SplitReadStreamRequest) ProtoReflect() protoreflect.Message
func (*SplitReadStreamRequest) Reset
func (x *SplitReadStreamRequest) Reset()
func (*SplitReadStreamRequest) String
func (x *SplitReadStreamRequest) String() string
SplitReadStreamResponse
type SplitReadStreamResponse struct {
// Primary stream, which contains the beginning portion of
// |original_stream|. An empty value indicates that the original stream can no
// longer be split.
PrimaryStream *ReadStream `protobuf:"bytes,1,opt,name=primary_stream,json=primaryStream,proto3" json:"primary_stream,omitempty"`
// Remainder stream, which contains the tail of |original_stream|. An empty
// value indicates that the original stream can no longer be split.
RemainderStream *ReadStream `protobuf:"bytes,2,opt,name=remainder_stream,json=remainderStream,proto3" json:"remainder_stream,omitempty"`
// contains filtered or unexported fields
}
Response message for SplitReadStream
.
func (*SplitReadStreamResponse) Descriptor
func (*SplitReadStreamResponse) Descriptor() ([]byte, []int)
Deprecated: Use SplitReadStreamResponse.ProtoReflect.Descriptor instead.
func (*SplitReadStreamResponse) GetPrimaryStream
func (x *SplitReadStreamResponse) GetPrimaryStream() *ReadStream
func (*SplitReadStreamResponse) GetRemainderStream
func (x *SplitReadStreamResponse) GetRemainderStream() *ReadStream
func (*SplitReadStreamResponse) ProtoMessage
func (*SplitReadStreamResponse) ProtoMessage()
func (*SplitReadStreamResponse) ProtoReflect
func (x *SplitReadStreamResponse) ProtoReflect() protoreflect.Message
func (*SplitReadStreamResponse) Reset
func (x *SplitReadStreamResponse) Reset()
func (*SplitReadStreamResponse) String
func (x *SplitReadStreamResponse) String() string
StorageError
type StorageError struct {
Code StorageError_StorageErrorCode "" /* 130 byte string literal not displayed */
Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"`
ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
}
Structured custom BigQuery Storage error message. The error can be attached as error details in the returned rpc Status. In particular, the use of error codes allows more structured error handling, and reduces the need to evaluate unstructured error text strings.
func (*StorageError) Descriptor
func (*StorageError) Descriptor() ([]byte, []int)
Deprecated: Use StorageError.ProtoReflect.Descriptor instead.
func (*StorageError) GetCode
func (x *StorageError) GetCode() StorageError_StorageErrorCode
func (*StorageError) GetEntity
func (x *StorageError) GetEntity() string
func (*StorageError) GetErrorMessage
func (x *StorageError) GetErrorMessage() string
func (*StorageError) ProtoMessage
func (*StorageError) ProtoMessage()
func (*StorageError) ProtoReflect
func (x *StorageError) ProtoReflect() protoreflect.Message
func (*StorageError) Reset
func (x *StorageError) Reset()
func (*StorageError) String
func (x *StorageError) String() string
StorageError_StorageErrorCode
type StorageError_StorageErrorCode int32
Error code for StorageError
.
StorageError_STORAGE_ERROR_CODE_UNSPECIFIED, StorageError_TABLE_NOT_FOUND, StorageError_STREAM_ALREADY_COMMITTED, StorageError_STREAM_NOT_FOUND, StorageError_INVALID_STREAM_TYPE, StorageError_INVALID_STREAM_STATE, StorageError_STREAM_FINALIZED, StorageError_SCHEMA_MISMATCH_EXTRA_FIELDS, StorageError_OFFSET_ALREADY_EXISTS, StorageError_OFFSET_OUT_OF_RANGE, StorageError_CMEK_NOT_PROVIDED, StorageError_INVALID_CMEK_PROVIDED, StorageError_CMEK_ENCRYPTION_ERROR, StorageError_KMS_SERVICE_ERROR, StorageError_KMS_PERMISSION_DENIED
const (
// Default error.
StorageError_STORAGE_ERROR_CODE_UNSPECIFIED StorageError_StorageErrorCode = 0
// Table is not found in the system.
StorageError_TABLE_NOT_FOUND StorageError_StorageErrorCode = 1
// Stream is already committed.
StorageError_STREAM_ALREADY_COMMITTED StorageError_StorageErrorCode = 2
// Stream is not found.
StorageError_STREAM_NOT_FOUND StorageError_StorageErrorCode = 3
// Invalid Stream type.
// For example, you try to commit a stream that is not pending.
StorageError_INVALID_STREAM_TYPE StorageError_StorageErrorCode = 4
// Invalid Stream state.
// For example, you try to commit a stream that is not finalized or is
// garbaged.
StorageError_INVALID_STREAM_STATE StorageError_StorageErrorCode = 5
// Stream is finalized.
StorageError_STREAM_FINALIZED StorageError_StorageErrorCode = 6
// There is a schema mismatch and it is caused by user schema has extra
// field than bigquery schema.
StorageError_SCHEMA_MISMATCH_EXTRA_FIELDS StorageError_StorageErrorCode = 7
// Offset already exists.
StorageError_OFFSET_ALREADY_EXISTS StorageError_StorageErrorCode = 8
// Offset out of range.
StorageError_OFFSET_OUT_OF_RANGE StorageError_StorageErrorCode = 9
// Customer-managed encryption key (CMEK) not provided for CMEK-enabled
// data.
StorageError_CMEK_NOT_PROVIDED StorageError_StorageErrorCode = 10
// Customer-managed encryption key (CMEK) was incorrectly provided.
StorageError_INVALID_CMEK_PROVIDED StorageError_StorageErrorCode = 11
// There is an encryption error while using customer-managed encryption key.
StorageError_CMEK_ENCRYPTION_ERROR StorageError_StorageErrorCode = 12
// Key Management Service (KMS) service returned an error, which can be
// retried.
StorageError_KMS_SERVICE_ERROR StorageError_StorageErrorCode = 13
// Permission denied while using customer-managed encryption key.
StorageError_KMS_PERMISSION_DENIED StorageError_StorageErrorCode = 14
)
func (StorageError_StorageErrorCode) Descriptor
func (StorageError_StorageErrorCode) Descriptor() protoreflect.EnumDescriptor
func (StorageError_StorageErrorCode) Enum
func (x StorageError_StorageErrorCode) Enum() *StorageError_StorageErrorCode
func (StorageError_StorageErrorCode) EnumDescriptor
func (StorageError_StorageErrorCode) EnumDescriptor() ([]byte, []int)
Deprecated: Use StorageError_StorageErrorCode.Descriptor instead.
func (StorageError_StorageErrorCode) Number
func (x StorageError_StorageErrorCode) Number() protoreflect.EnumNumber
func (StorageError_StorageErrorCode) String
func (x StorageError_StorageErrorCode) String() string
func (StorageError_StorageErrorCode) Type
func (StorageError_StorageErrorCode) Type() protoreflect.EnumType
StreamStats
type StreamStats struct {
// Represents the progress of the current stream.
Progress *StreamStats_Progress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"`
// contains filtered or unexported fields
}
Estimated stream statistics for a given read Stream.
func (*StreamStats) Descriptor
func (*StreamStats) Descriptor() ([]byte, []int)
Deprecated: Use StreamStats.ProtoReflect.Descriptor instead.
func (*StreamStats) GetProgress
func (x *StreamStats) GetProgress() *StreamStats_Progress
func (*StreamStats) ProtoMessage
func (*StreamStats) ProtoMessage()
func (*StreamStats) ProtoReflect
func (x *StreamStats) ProtoReflect() protoreflect.Message
func (*StreamStats) Reset
func (x *StreamStats) Reset()
func (*StreamStats) String
func (x *StreamStats) String() string
StreamStats_Progress
type StreamStats_Progress struct {
// The fraction of rows assigned to the stream that have been processed by
// the server so far, not including the rows in the current response
// message.
//
// This value, along with `at_response_end`, can be used to interpolate
// the progress made as the rows in the message are being processed using
// the following formula: `at_response_start + (at_response_end -
// at_response_start) * rows_processed_from_response / rows_in_response`.
//
// Note that if a filter is provided, the `at_response_end` value of the
// previous response may not necessarily be equal to the
// `at_response_start` value of the current response.
AtResponseStart float64 `protobuf:"fixed64,1,opt,name=at_response_start,json=atResponseStart,proto3" json:"at_response_start,omitempty"`
// Similar to `at_response_start`, except that this value includes the
// rows in the current response.
AtResponseEnd float64 `protobuf:"fixed64,2,opt,name=at_response_end,json=atResponseEnd,proto3" json:"at_response_end,omitempty"`
// contains filtered or unexported fields
}
func (*StreamStats_Progress) Descriptor
func (*StreamStats_Progress) Descriptor() ([]byte, []int)
Deprecated: Use StreamStats_Progress.ProtoReflect.Descriptor instead.
func (*StreamStats_Progress) GetAtResponseEnd
func (x *StreamStats_Progress) GetAtResponseEnd() float64
func (*StreamStats_Progress) GetAtResponseStart
func (x *StreamStats_Progress) GetAtResponseStart() float64
func (*StreamStats_Progress) ProtoMessage
func (*StreamStats_Progress) ProtoMessage()
func (*StreamStats_Progress) ProtoReflect
func (x *StreamStats_Progress) ProtoReflect() protoreflect.Message
func (*StreamStats_Progress) Reset
func (x *StreamStats_Progress) Reset()
func (*StreamStats_Progress) String
func (x *StreamStats_Progress) String() string
TableFieldSchema
type TableFieldSchema struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Type TableFieldSchema_Type `protobuf:"varint,2,opt,name=type,proto3,enum=google.cloud.bigquery.storage.v1.TableFieldSchema_Type" json:"type,omitempty"`
Mode TableFieldSchema_Mode `protobuf:"varint,3,opt,name=mode,proto3,enum=google.cloud.bigquery.storage.v1.TableFieldSchema_Mode" json:"mode,omitempty"`
Fields []*TableFieldSchema `protobuf:"bytes,4,rep,name=fields,proto3" json:"fields,omitempty"`
Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
MaxLength int64 `protobuf:"varint,7,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
Precision int64 `protobuf:"varint,8,opt,name=precision,proto3" json:"precision,omitempty"`
Scale int64 `protobuf:"varint,9,opt,name=scale,proto3" json:"scale,omitempty"`
DefaultValueExpression string "" /* 130 byte string literal not displayed */
RangeElementType *TableFieldSchema_FieldElementType `protobuf:"bytes,11,opt,name=range_element_type,json=rangeElementType,proto3" json:"range_element_type,omitempty"`
}
TableFieldSchema defines a single field/column within a table schema.
func (*TableFieldSchema) Descriptor
func (*TableFieldSchema) Descriptor() ([]byte, []int)
Deprecated: Use TableFieldSchema.ProtoReflect.Descriptor instead.
func (*TableFieldSchema) GetDefaultValueExpression
func (x *TableFieldSchema) GetDefaultValueExpression() string
func (*TableFieldSchema) GetDescription
func (x *TableFieldSchema) GetDescription() string
func (*TableFieldSchema) GetFields
func (x *TableFieldSchema) GetFields() []*TableFieldSchema
func (*TableFieldSchema) GetMaxLength
func (x *TableFieldSchema) GetMaxLength() int64
func (*TableFieldSchema) GetMode
func (x *TableFieldSchema) GetMode() TableFieldSchema_Mode
func (*TableFieldSchema) GetName
func (x *TableFieldSchema) GetName() string
func (*TableFieldSchema) GetPrecision
func (x *TableFieldSchema) GetPrecision() int64
func (*TableFieldSchema) GetRangeElementType
func (x *TableFieldSchema) GetRangeElementType() *TableFieldSchema_FieldElementType
func (*TableFieldSchema) GetScale
func (x *TableFieldSchema) GetScale() int64
func (*TableFieldSchema) GetType
func (x *TableFieldSchema) GetType() TableFieldSchema_Type
func (*TableFieldSchema) ProtoMessage
func (*TableFieldSchema) ProtoMessage()
func (*TableFieldSchema) ProtoReflect
func (x *TableFieldSchema) ProtoReflect() protoreflect.Message
func (*TableFieldSchema) Reset
func (x *TableFieldSchema) Reset()
func (*TableFieldSchema) String
func (x *TableFieldSchema) String() string
TableFieldSchema_FieldElementType
type TableFieldSchema_FieldElementType struct {
// Required. The type of a field element.
Type TableFieldSchema_Type `protobuf:"varint,1,opt,name=type,proto3,enum=google.cloud.bigquery.storage.v1.TableFieldSchema_Type" json:"type,omitempty"`
// contains filtered or unexported fields
}
Represents the type of a field element.
func (*TableFieldSchema_FieldElementType) Descriptor
func (*TableFieldSchema_FieldElementType) Descriptor() ([]byte, []int)
Deprecated: Use TableFieldSchema_FieldElementType.ProtoReflect.Descriptor instead.
func (*TableFieldSchema_FieldElementType) GetType
func (x *TableFieldSchema_FieldElementType) GetType() TableFieldSchema_Type
func (*TableFieldSchema_FieldElementType) ProtoMessage
func (*TableFieldSchema_FieldElementType) ProtoMessage()
func (*TableFieldSchema_FieldElementType) ProtoReflect
func (x *TableFieldSchema_FieldElementType) ProtoReflect() protoreflect.Message
func (*TableFieldSchema_FieldElementType) Reset
func (x *TableFieldSchema_FieldElementType) Reset()
func (*TableFieldSchema_FieldElementType) String
func (x *TableFieldSchema_FieldElementType) String() string
TableFieldSchema_Mode
type TableFieldSchema_Mode int32
TableFieldSchema_MODE_UNSPECIFIED, TableFieldSchema_NULLABLE, TableFieldSchema_REQUIRED, TableFieldSchema_REPEATED
const (
// Illegal value
TableFieldSchema_MODE_UNSPECIFIED TableFieldSchema_Mode = 0
TableFieldSchema_NULLABLE TableFieldSchema_Mode = 1
TableFieldSchema_REQUIRED TableFieldSchema_Mode = 2
TableFieldSchema_REPEATED TableFieldSchema_Mode = 3
)
func (TableFieldSchema_Mode) Descriptor
func (TableFieldSchema_Mode) Descriptor() protoreflect.EnumDescriptor
func (TableFieldSchema_Mode) Enum
func (x TableFieldSchema_Mode) Enum() *TableFieldSchema_Mode
func (TableFieldSchema_Mode) EnumDescriptor
func (TableFieldSchema_Mode) EnumDescriptor() ([]byte, []int)
Deprecated: Use TableFieldSchema_Mode.Descriptor instead.
func (TableFieldSchema_Mode) Number
func (x TableFieldSchema_Mode) Number() protoreflect.EnumNumber
func (TableFieldSchema_Mode) String
func (x TableFieldSchema_Mode) String() string
func (TableFieldSchema_Mode) Type
func (TableFieldSchema_Mode) Type() protoreflect.EnumType
TableFieldSchema_Type
type TableFieldSchema_Type int32
TableFieldSchema_TYPE_UNSPECIFIED, TableFieldSchema_STRING, TableFieldSchema_INT64, TableFieldSchema_DOUBLE, TableFieldSchema_STRUCT, TableFieldSchema_BYTES, TableFieldSchema_BOOL, TableFieldSchema_TIMESTAMP, TableFieldSchema_DATE, TableFieldSchema_TIME, TableFieldSchema_DATETIME, TableFieldSchema_GEOGRAPHY, TableFieldSchema_NUMERIC, TableFieldSchema_BIGNUMERIC, TableFieldSchema_INTERVAL, TableFieldSchema_JSON, TableFieldSchema_RANGE
const (
// Illegal value
TableFieldSchema_TYPE_UNSPECIFIED TableFieldSchema_Type = 0
// 64K, UTF8
TableFieldSchema_STRING TableFieldSchema_Type = 1
// 64-bit signed
TableFieldSchema_INT64 TableFieldSchema_Type = 2
// 64-bit IEEE floating point
TableFieldSchema_DOUBLE TableFieldSchema_Type = 3
// Aggregate type
TableFieldSchema_STRUCT TableFieldSchema_Type = 4
// 64K, Binary
TableFieldSchema_BYTES TableFieldSchema_Type = 5
// 2-valued
TableFieldSchema_BOOL TableFieldSchema_Type = 6
// 64-bit signed usec since UTC epoch
TableFieldSchema_TIMESTAMP TableFieldSchema_Type = 7
// Civil date - Year, Month, Day
TableFieldSchema_DATE TableFieldSchema_Type = 8
// Civil time - Hour, Minute, Second, Microseconds
TableFieldSchema_TIME TableFieldSchema_Type = 9
// Combination of civil date and civil time
TableFieldSchema_DATETIME TableFieldSchema_Type = 10
// Geography object
TableFieldSchema_GEOGRAPHY TableFieldSchema_Type = 11
// Numeric value
TableFieldSchema_NUMERIC TableFieldSchema_Type = 12
// BigNumeric value
TableFieldSchema_BIGNUMERIC TableFieldSchema_Type = 13
// Interval
TableFieldSchema_INTERVAL TableFieldSchema_Type = 14
// JSON, String
TableFieldSchema_JSON TableFieldSchema_Type = 15
// RANGE
TableFieldSchema_RANGE TableFieldSchema_Type = 16
)
func (TableFieldSchema_Type) Descriptor
func (TableFieldSchema_Type) Descriptor() protoreflect.EnumDescriptor
func (TableFieldSchema_Type) Enum
func (x TableFieldSchema_Type) Enum() *TableFieldSchema_Type
func (TableFieldSchema_Type) EnumDescriptor
func (TableFieldSchema_Type) EnumDescriptor() ([]byte, []int)
Deprecated: Use TableFieldSchema_Type.Descriptor instead.
func (TableFieldSchema_Type) Number
func (x TableFieldSchema_Type) Number() protoreflect.EnumNumber
func (TableFieldSchema_Type) String
func (x TableFieldSchema_Type) String() string
func (TableFieldSchema_Type) Type
func (TableFieldSchema_Type) Type() protoreflect.EnumType
TableSchema
type TableSchema struct {
// Describes the fields in a table.
Fields []*TableFieldSchema `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"`
// contains filtered or unexported fields
}
Schema of a table. This schema is a subset of google.cloud.bigquery.v2.TableSchema containing information necessary to generate valid message to write to BigQuery.
func (*TableSchema) Descriptor
func (*TableSchema) Descriptor() ([]byte, []int)
Deprecated: Use TableSchema.ProtoReflect.Descriptor instead.
func (*TableSchema) GetFields
func (x *TableSchema) GetFields() []*TableFieldSchema
func (*TableSchema) ProtoMessage
func (*TableSchema) ProtoMessage()
func (*TableSchema) ProtoReflect
func (x *TableSchema) ProtoReflect() protoreflect.Message
func (*TableSchema) Reset
func (x *TableSchema) Reset()
func (*TableSchema) String
func (x *TableSchema) String() string
ThrottleState
type ThrottleState struct {
// How much this connection is being throttled. Zero means no throttling,
// 100 means fully throttled.
ThrottlePercent int32 `protobuf:"varint,1,opt,name=throttle_percent,json=throttlePercent,proto3" json:"throttle_percent,omitempty"`
// contains filtered or unexported fields
}
Information on if the current connection is being throttled.
func (*ThrottleState) Descriptor
func (*ThrottleState) Descriptor() ([]byte, []int)
Deprecated: Use ThrottleState.ProtoReflect.Descriptor instead.
func (*ThrottleState) GetThrottlePercent
func (x *ThrottleState) GetThrottlePercent() int32
func (*ThrottleState) ProtoMessage
func (*ThrottleState) ProtoMessage()
func (*ThrottleState) ProtoReflect
func (x *ThrottleState) ProtoReflect() protoreflect.Message
func (*ThrottleState) Reset
func (x *ThrottleState) Reset()
func (*ThrottleState) String
func (x *ThrottleState) String() string
UnimplementedBigQueryReadServer
type UnimplementedBigQueryReadServer struct {
}
UnimplementedBigQueryReadServer can be embedded to have forward compatible implementations.
func (*UnimplementedBigQueryReadServer) CreateReadSession
func (*UnimplementedBigQueryReadServer) CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error)
func (*UnimplementedBigQueryReadServer) ReadRows
func (*UnimplementedBigQueryReadServer) ReadRows(*ReadRowsRequest, BigQueryRead_ReadRowsServer) error
func (*UnimplementedBigQueryReadServer) SplitReadStream
func (*UnimplementedBigQueryReadServer) SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error)
UnimplementedBigQueryWriteServer
type UnimplementedBigQueryWriteServer struct {
}
UnimplementedBigQueryWriteServer can be embedded to have forward compatible implementations.
func (*UnimplementedBigQueryWriteServer) AppendRows
func (*UnimplementedBigQueryWriteServer) AppendRows(BigQueryWrite_AppendRowsServer) error
func (*UnimplementedBigQueryWriteServer) BatchCommitWriteStreams
func (*UnimplementedBigQueryWriteServer) BatchCommitWriteStreams(context.Context, *BatchCommitWriteStreamsRequest) (*BatchCommitWriteStreamsResponse, error)
func (*UnimplementedBigQueryWriteServer) CreateWriteStream
func (*UnimplementedBigQueryWriteServer) CreateWriteStream(context.Context, *CreateWriteStreamRequest) (*WriteStream, error)
func (*UnimplementedBigQueryWriteServer) FinalizeWriteStream
func (*UnimplementedBigQueryWriteServer) FinalizeWriteStream(context.Context, *FinalizeWriteStreamRequest) (*FinalizeWriteStreamResponse, error)
func (*UnimplementedBigQueryWriteServer) FlushRows
func (*UnimplementedBigQueryWriteServer) FlushRows(context.Context, *FlushRowsRequest) (*FlushRowsResponse, error)
func (*UnimplementedBigQueryWriteServer) GetWriteStream
func (*UnimplementedBigQueryWriteServer) GetWriteStream(context.Context, *GetWriteStreamRequest) (*WriteStream, error)
WriteStream
type WriteStream struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Type WriteStream_Type `protobuf:"varint,2,opt,name=type,proto3,enum=google.cloud.bigquery.storage.v1.WriteStream_Type" json:"type,omitempty"`
CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
CommitTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=commit_time,json=commitTime,proto3" json:"commit_time,omitempty"`
TableSchema *TableSchema `protobuf:"bytes,5,opt,name=table_schema,json=tableSchema,proto3" json:"table_schema,omitempty"`
WriteMode WriteStream_WriteMode "" /* 149 byte string literal not displayed */
Location string `protobuf:"bytes,8,opt,name=location,proto3" json:"location,omitempty"`
}
Information about a single stream that gets data inside the storage system.
func (*WriteStream) Descriptor
func (*WriteStream) Descriptor() ([]byte, []int)
Deprecated: Use WriteStream.ProtoReflect.Descriptor instead.
func (*WriteStream) GetCommitTime
func (x *WriteStream) GetCommitTime() *timestamppb.Timestamp
func (*WriteStream) GetCreateTime
func (x *WriteStream) GetCreateTime() *timestamppb.Timestamp
func (*WriteStream) GetLocation
func (x *WriteStream) GetLocation() string
func (*WriteStream) GetName
func (x *WriteStream) GetName() string
func (*WriteStream) GetTableSchema
func (x *WriteStream) GetTableSchema() *TableSchema
func (*WriteStream) GetType
func (x *WriteStream) GetType() WriteStream_Type
func (*WriteStream) GetWriteMode
func (x *WriteStream) GetWriteMode() WriteStream_WriteMode
func (*WriteStream) ProtoMessage
func (*WriteStream) ProtoMessage()
func (*WriteStream) ProtoReflect
func (x *WriteStream) ProtoReflect() protoreflect.Message
func (*WriteStream) Reset
func (x *WriteStream) Reset()
func (*WriteStream) String
func (x *WriteStream) String() string
WriteStreamView
type WriteStreamView int32
WriteStreamView is a view enum that controls what details about a write stream should be returned.
WriteStreamView_WRITE_STREAM_VIEW_UNSPECIFIED, WriteStreamView_BASIC, WriteStreamView_FULL
const (
// The default / unset value.
WriteStreamView_WRITE_STREAM_VIEW_UNSPECIFIED WriteStreamView = 0
// The BASIC projection returns basic metadata about a write stream. The
// basic view does not include schema information. This is the default view
// returned by GetWriteStream.
WriteStreamView_BASIC WriteStreamView = 1
// The FULL projection returns all available write stream metadata, including
// the schema. CreateWriteStream returns the full projection of write stream
// metadata.
WriteStreamView_FULL WriteStreamView = 2
)
func (WriteStreamView) Descriptor
func (WriteStreamView) Descriptor() protoreflect.EnumDescriptor
func (WriteStreamView) Enum
func (x WriteStreamView) Enum() *WriteStreamView
func (WriteStreamView) EnumDescriptor
func (WriteStreamView) EnumDescriptor() ([]byte, []int)
Deprecated: Use WriteStreamView.Descriptor instead.
func (WriteStreamView) Number
func (x WriteStreamView) Number() protoreflect.EnumNumber
func (WriteStreamView) String
func (x WriteStreamView) String() string
func (WriteStreamView) Type
func (WriteStreamView) Type() protoreflect.EnumType
WriteStream_Type
type WriteStream_Type int32
Type enum of the stream.
WriteStream_TYPE_UNSPECIFIED, WriteStream_COMMITTED, WriteStream_PENDING, WriteStream_BUFFERED
const (
// Unknown type.
WriteStream_TYPE_UNSPECIFIED WriteStream_Type = 0
// Data will commit automatically and appear as soon as the write is
// acknowledged.
WriteStream_COMMITTED WriteStream_Type = 1
// Data is invisible until the stream is committed.
WriteStream_PENDING WriteStream_Type = 2
// Data is only visible up to the offset to which it was flushed.
WriteStream_BUFFERED WriteStream_Type = 3
)
func (WriteStream_Type) Descriptor
func (WriteStream_Type) Descriptor() protoreflect.EnumDescriptor
func (WriteStream_Type) Enum
func (x WriteStream_Type) Enum() *WriteStream_Type
func (WriteStream_Type) EnumDescriptor
func (WriteStream_Type) EnumDescriptor() ([]byte, []int)
Deprecated: Use WriteStream_Type.Descriptor instead.
func (WriteStream_Type) Number
func (x WriteStream_Type) Number() protoreflect.EnumNumber
func (WriteStream_Type) String
func (x WriteStream_Type) String() string
func (WriteStream_Type) Type
func (WriteStream_Type) Type() protoreflect.EnumType
WriteStream_WriteMode
type WriteStream_WriteMode int32
Mode enum of the stream.
WriteStream_WRITE_MODE_UNSPECIFIED, WriteStream_INSERT
const (
// Unknown type.
WriteStream_WRITE_MODE_UNSPECIFIED WriteStream_WriteMode = 0
// Insert new records into the table.
// It is the default value if customers do not specify it.
WriteStream_INSERT WriteStream_WriteMode = 1
)