- 0.58.0 (latest)
- 0.57.0
- 0.56.0
- 0.55.0
- 0.54.0
- 0.53.0
- 0.52.0
- 0.51.0
- 0.50.0
- 0.49.0
- 0.48.0
- 0.47.0
- 0.46.0
- 0.45.0
- 0.44.0
- 0.43.0
- 0.42.0
- 0.41.0
- 0.40.0
- 0.39.0
- 0.38.0
- 0.37.0
- 0.36.0
- 0.35.0
- 0.34.0
- 0.33.0
- 0.32.0
- 0.31.0
- 0.30.0
- 0.29.0
- 0.28.0
- 0.27.0
- 0.26.0
- 0.25.0
- 0.24.0
- 0.23.0
- 0.22.0
- 0.21.0
- 0.20.0
- 0.19.0
- 0.18.0
- 0.17.0
- 0.16.0
- 0.15.0
- 0.14.0
- 0.13.0
- 0.12.0
- 0.11.0
- 0.10.0
- 0.9.1
- 0.8.0
- 0.7.0
- 0.6.0
- 0.5.0
- 0.4.0
- 0.3.0
- 0.2.0
- 0.1.0
Reference documentation and code samples for the Vertex AI V1 API class Google::Cloud::AIPlatform::V1::CountTokensRequest.
Request message for [PredictionService.CountTokens][].
Inherits
- Object
Extended By
- Google::Protobuf::MessageExts::ClassMethods
Includes
- Google::Protobuf::MessageExts
Methods
#contents
def contents() -> ::Array<::Google::Cloud::AIPlatform::V1::Content>
- (::Array<::Google::Cloud::AIPlatform::V1::Content>) — Optional. Input content.
#contents=
def contents=(value) -> ::Array<::Google::Cloud::AIPlatform::V1::Content>
- value (::Array<::Google::Cloud::AIPlatform::V1::Content>) — Optional. Input content.
- (::Array<::Google::Cloud::AIPlatform::V1::Content>) — Optional. Input content.
#endpoint
def endpoint() -> ::String
-
(::String) — Required. The name of the Endpoint requested to perform token counting.
Format:
projects/{project}/locations/{location}/endpoints/{endpoint}
#endpoint=
def endpoint=(value) -> ::String
-
value (::String) — Required. The name of the Endpoint requested to perform token counting.
Format:
projects/{project}/locations/{location}/endpoints/{endpoint}
-
(::String) — Required. The name of the Endpoint requested to perform token counting.
Format:
projects/{project}/locations/{location}/endpoints/{endpoint}
#generation_config
def generation_config() -> ::Google::Cloud::AIPlatform::V1::GenerationConfig
- (::Google::Cloud::AIPlatform::V1::GenerationConfig) — Optional. Generation config that the model will use to generate the response.
#generation_config=
def generation_config=(value) -> ::Google::Cloud::AIPlatform::V1::GenerationConfig
- value (::Google::Cloud::AIPlatform::V1::GenerationConfig) — Optional. Generation config that the model will use to generate the response.
- (::Google::Cloud::AIPlatform::V1::GenerationConfig) — Optional. Generation config that the model will use to generate the response.
#instances
def instances() -> ::Array<::Google::Protobuf::Value>
- (::Array<::Google::Protobuf::Value>) — Optional. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model.
#instances=
def instances=(value) -> ::Array<::Google::Protobuf::Value>
- value (::Array<::Google::Protobuf::Value>) — Optional. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model.
- (::Array<::Google::Protobuf::Value>) — Optional. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model.
#model
def model() -> ::String
-
(::String) — Optional. The name of the publisher model requested to serve the
prediction. Format:
projects/{project}/locations/{location}/publishers/*/models/*
#model=
def model=(value) -> ::String
-
value (::String) — Optional. The name of the publisher model requested to serve the
prediction. Format:
projects/{project}/locations/{location}/publishers/*/models/*
-
(::String) — Optional. The name of the publisher model requested to serve the
prediction. Format:
projects/{project}/locations/{location}/publishers/*/models/*
#system_instruction
def system_instruction() -> ::Google::Cloud::AIPlatform::V1::Content
- (::Google::Cloud::AIPlatform::V1::Content) — Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph.
#system_instruction=
def system_instruction=(value) -> ::Google::Cloud::AIPlatform::V1::Content
- value (::Google::Cloud::AIPlatform::V1::Content) — Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph.
- (::Google::Cloud::AIPlatform::V1::Content) — Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph.
#tools
def tools() -> ::Array<::Google::Cloud::AIPlatform::V1::Tool>
-
(::Array<::Google::Cloud::AIPlatform::V1::Tool>) — Optional. A list of
Tools
the model may use to generate the next response.A
Tool
is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model.
#tools=
def tools=(value) -> ::Array<::Google::Cloud::AIPlatform::V1::Tool>
-
value (::Array<::Google::Cloud::AIPlatform::V1::Tool>) — Optional. A list of
Tools
the model may use to generate the next response.A
Tool
is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model.
-
(::Array<::Google::Cloud::AIPlatform::V1::Tool>) — Optional. A list of
Tools
the model may use to generate the next response.A
Tool
is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model.