Class AsyncEvals (1.122.0)

AsyncEvals(api_client_: google.genai._api_client.BaseApiClient)

API documentation for AsyncEvals class.

Methods

batch_evaluate

batch_evaluate(
    *,
    dataset: typing.Union[
        vertexai._genai.types.EvaluationDataset,
        vertexai._genai.types.EvaluationDatasetDict,
    ],
    metrics: list[
        typing.Union[vertexai._genai.types.Metric, vertexai._genai.types.MetricDict]
    ],
    dest: str,
    config: typing.Optional[
        typing.Union[
            vertexai._genai.types.EvaluateDatasetConfig,
            vertexai._genai.types.EvaluateDatasetConfigDict,
        ]
    ] = None
) -> vertexai._genai.types.EvaluateDatasetOperation

Evaluates a dataset based on a set of given metrics.

create_evaluation_item

create_evaluation_item(
    *,
    evaluation_item_type: vertexai._genai.types.EvaluationItemType,
    gcs_uri: str,
    display_name: typing.Optional[str] = None,
    config: typing.Optional[
        typing.Union[
            vertexai._genai.types.CreateEvaluationItemConfig,
            vertexai._genai.types.CreateEvaluationItemConfigDict,
        ]
    ] = None
) -> vertexai._genai.types.EvaluationItem

Creates an EvaluationItem.

create_evaluation_run

create_evaluation_run(
    *,
    name: str,
    dataset: typing.Union[
        vertexai._genai.types.EvaluationRunDataSource,
        vertexai._genai.types.EvaluationDataset,
    ],
    dest: str,
    display_name: typing.Optional[str] = None,
    metrics: typing.Optional[
        list[
            typing.Union[
                vertexai._genai.types.EvaluationRunMetric,
                vertexai._genai.types.EvaluationRunMetricDict,
            ]
        ]
    ] = None,
    agent_info: typing.Optional[vertexai._genai.types.AgentInfo] = None,
    labels: typing.Optional[dict[str, str]] = None,
    config: typing.Optional[
        typing.Union[
            vertexai._genai.types.CreateEvaluationRunConfig,
            vertexai._genai.types.CreateEvaluationRunConfigDict,
        ]
    ] = None
) -> vertexai._genai.types.EvaluationRun

Creates an EvaluationRun.

create_evaluation_set

create_evaluation_set(
    *,
    evaluation_items: list[str],
    display_name: typing.Optional[str] = None,
    config: typing.Optional[
        typing.Union[
            vertexai._genai.types.CreateEvaluationSetConfig,
            vertexai._genai.types.CreateEvaluationSetConfigDict,
        ]
    ] = None
) -> vertexai._genai.types.EvaluationSet

Creates an EvaluationSet.

evaluate_instances

evaluate_instances(
    *, metric_config: vertexai._genai.types._EvaluateInstancesRequestParameters
) -> vertexai._genai.types.EvaluateInstancesResponse

Evaluates an instance of a model.

get_evaluation_item

get_evaluation_item(
    *,
    name: str,
    config: typing.Optional[
        typing.Union[
            vertexai._genai.types.GetEvaluationItemConfig,
            vertexai._genai.types.GetEvaluationItemConfigDict,
        ]
    ] = None
) -> vertexai._genai.types.EvaluationItem

Retrieves an EvaluationItem from the resource name.

get_evaluation_run

get_evaluation_run(
    *,
    name: str,
    include_evaluation_items: bool = False,
    config: typing.Optional[
        typing.Union[
            vertexai._genai.types.GetEvaluationRunConfig,
            vertexai._genai.types.GetEvaluationRunConfigDict,
        ]
    ] = None
) -> vertexai._genai.types.EvaluationRun

Retrieves an EvaluationRun from the resource name.

get_evaluation_set

get_evaluation_set(
    *,
    name: str,
    config: typing.Optional[
        typing.Union[
            vertexai._genai.types.GetEvaluationSetConfig,
            vertexai._genai.types.GetEvaluationSetConfigDict,
        ]
    ] = None
) -> vertexai._genai.types.EvaluationSet

Retrieves an EvaluationSet from the resource name.