GenerativeModel(
    model_name: str,
    *,
    generation_config: typing.Optional[GenerationConfigType] = None,
    safety_settings: typing.Optional[SafetySettingsType] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    tool_config: typing.Optional[
        vertexai.generative_models._generative_models.ToolConfig
    ] = None,
    system_instruction: typing.Optional[PartsType] = None,
    labels: typing.Optional[typing.Dict[str, str]] = None
)Initializes GenerativeModel.
Usage:
```
model = GenerativeModel("gemini-pro")
print(model.generate_content("Hello"))
```
Methods
compute_tokens
compute_tokens(
    contents: ContentsType,
) -> google.cloud.aiplatform_v1beta1.types.llm_utility_service.ComputeTokensResponseComputes tokens.
| Returns | |
|---|---|
| Type | Description | 
| A ComputeTokensResponse object that has the following attributes | tokens_info: Lists of tokens_info from the input. The input contents: ContentsTypecould have multiple string instances and each tokens_info item represents each string instance. Each token info consists tokens list, token_ids list and a role. | 
compute_tokens_async
compute_tokens_async(
    contents: ContentsType,
) -> google.cloud.aiplatform_v1beta1.types.llm_utility_service.ComputeTokensResponseComputes tokens asynchronously.
| Returns | |
|---|---|
| Type | Description | 
| And awaitable for a ComputeTokensResponse object that has the following attributes | tokens_info: Lists of tokens_info from the input. The input contents: ContentsTypecould have multiple string instances and each tokens_info item represents each string instance. Each token info consists tokens list, token_ids list and a role. | 
count_tokens
count_tokens(
    contents: ContentsType,
    *,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None
) -> google.cloud.aiplatform_v1beta1.types.prediction_service.CountTokensResponseCounts tokens.
| Returns | |
|---|---|
| Type | Description | 
| A CountTokensResponse object that has the following attributes | total_tokens: The total number of tokens counted across all instances from the request. total_billable_characters: The total number of billable characters counted across all instances from the request. | 
count_tokens_async
count_tokens_async(
    contents: ContentsType,
    *,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None
) -> google.cloud.aiplatform_v1beta1.types.prediction_service.CountTokensResponseCounts tokens asynchronously.
| Returns | |
|---|---|
| Type | Description | 
| And awaitable for a CountTokensResponse object that has the following attributes | total_tokens: The total number of tokens counted across all instances from the request. total_billable_characters: The total number of billable characters counted across all instances from the request. | 
from_cached_content
from_cached_content(
    cached_content: typing.Union[str, CachedContent],
    *,
    generation_config: typing.Optional[GenerationConfigType] = None,
    safety_settings: typing.Optional[SafetySettingsType] = None
) -> _GenerativeModelCreates a model from cached content.
Creates a model instance with an existing cached content. The cached content becomes the prefix of the requesting contents.
generate_content
generate_content(
    contents: ContentsType,
    *,
    generation_config: typing.Optional[GenerationConfigType] = None,
    safety_settings: typing.Optional[SafetySettingsType] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    tool_config: typing.Optional[
        vertexai.generative_models._generative_models.ToolConfig
    ] = None,
    labels: typing.Optional[typing.Dict[str, str]] = None,
    stream: bool = False
) -> typing.Union[
    vertexai.generative_models._generative_models.GenerationResponse,
    typing.Iterable[vertexai.generative_models._generative_models.GenerationResponse],
]Generates content.
generate_content_async
generate_content_async(
    contents: ContentsType,
    *,
    generation_config: typing.Optional[GenerationConfigType] = None,
    safety_settings: typing.Optional[SafetySettingsType] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    tool_config: typing.Optional[
        vertexai.generative_models._generative_models.ToolConfig
    ] = None,
    labels: typing.Optional[typing.Dict[str, str]] = None,
    stream: bool = False
) -> typing.Union[
    vertexai.generative_models._generative_models.GenerationResponse,
    typing.AsyncIterable[
        vertexai.generative_models._generative_models.GenerationResponse
    ],
]Generates content asynchronously.
set_request_response_logging_config
set_request_response_logging_config(
    *,
    enabled: bool,
    sampling_rate: float,
    bigquery_destination: str,
    enable_otel_logging: typing.Optional[bool] = None
) -> typing.Union[
    google.cloud.aiplatform_v1beta1.types.endpoint.PublisherModelConfig,
    google.cloud.aiplatform_v1beta1.types.endpoint.Endpoint,
]Sets the request/response logging config.
start_chat
start_chat(
    *,
    history: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Content]
    ] = None,
    response_validation: bool = True,
    responder: typing.Optional[
        vertexai.generative_models._generative_models.AutomaticFunctionCallingResponder
    ] = None
) -> vertexai.generative_models._generative_models.ChatSessionCreates a stateful chat session.