ChatSession(
model: vertexai.generative_models._generative_models._GenerativeModel,
*,
history: typing.Optional[
typing.List[vertexai.generative_models._generative_models.Content]
] = None,
response_validation: bool = True,
responder: typing.Optional[
vertexai.generative_models._generative_models.AutomaticFunctionCallingResponder
] = None,
raise_on_blocked: typing.Optional[bool] = None
)
Chat session holds the chat history.
Methods
send_message
send_message(
content: typing.Union[
str,
vertexai.generative_models._generative_models.Image,
vertexai.generative_models._generative_models.Part,
typing.List[
typing.Union[
str,
vertexai.generative_models._generative_models.Image,
vertexai.generative_models._generative_models.Part,
]
],
],
*,
generation_config: typing.Optional[
typing.Union[
vertexai.generative_models._generative_models.GenerationConfig,
typing.Dict[str, typing.Any],
]
] = None,
safety_settings: typing.Optional[
typing.Union[
typing.List[vertexai.generative_models._generative_models.SafetySetting],
typing.Dict[
google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
],
]
] = None,
tools: typing.Optional[
typing.List[vertexai.generative_models._generative_models.Tool]
] = None,
labels: typing.Optional[typing.Dict[str, str]] = None,
stream: bool = False
) -> typing.Union[
vertexai.generative_models._generative_models.GenerationResponse,
typing.Iterable[vertexai.generative_models._generative_models.GenerationResponse],
]
Generates content.
Exceptions | |
---|---|
Type | Description |
ResponseValidationError |
If the response was blocked or is incomplete. |
send_message_async
send_message_async(
content: typing.Union[
str,
vertexai.generative_models._generative_models.Image,
vertexai.generative_models._generative_models.Part,
typing.List[
typing.Union[
str,
vertexai.generative_models._generative_models.Image,
vertexai.generative_models._generative_models.Part,
]
],
],
*,
generation_config: typing.Optional[
typing.Union[
vertexai.generative_models._generative_models.GenerationConfig,
typing.Dict[str, typing.Any],
]
] = None,
safety_settings: typing.Optional[
typing.Union[
typing.List[vertexai.generative_models._generative_models.SafetySetting],
typing.Dict[
google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
],
]
] = None,
tools: typing.Optional[
typing.List[vertexai.generative_models._generative_models.Tool]
] = None,
labels: typing.Optional[typing.Dict[str, str]] = None,
stream: bool = False
) -> typing.Union[
typing.Awaitable[vertexai.generative_models._generative_models.GenerationResponse],
typing.Awaitable[
typing.AsyncIterable[
vertexai.generative_models._generative_models.GenerationResponse
]
],
]
Generates content asynchronously.
Exceptions | |
---|---|
Type | Description |
ResponseValidationError |
If the response was blocked or is incomplete. |