This legacy version of AutoML Tables is deprecated and will no longer be available on Google Cloud after January 23, 2024. All the functionality of legacy AutoML Tables and new features are available on the Vertex AI platform. See Migrate to Vertex AI to learn how to migrate your resources.

Display a model evaluation

Demonstrates displaying a model evaluation.

Code sample

Python

To authenticate to AutoML Tables, set up Application Default Credentials. For more information, see Set up authentication for a local development environment.

# TODO(developer): Uncomment and set the following variables
# project_id = 'PROJECT_ID_HERE'
# compute_region = 'COMPUTE_REGION_HERE'
# model_display_name = 'MODEL_DISPLAY_NAME_HERE'
# filter = 'filter expression here'

from google.cloud import automl_v1beta1 as automl

client = automl.TablesClient(project=project_id, region=compute_region)

# List all the model evaluations in the model by applying filter.
response = client.list_model_evaluations(
    model_display_name=model_display_name, filter=filter
)

# Iterate through the results.
for evaluation in response:
    # There is evaluation for each class in a model and for overall model.
    # Get only the evaluation of overall model.
    if not evaluation.annotation_spec_id:
        model_evaluation_name = evaluation.name
        break

# Get a model evaluation.
model_evaluation = client.get_model_evaluation(
    model_evaluation_name=model_evaluation_name
)

classification_metrics = model_evaluation.classification_evaluation_metrics
if str(classification_metrics):
    confidence_metrics = classification_metrics.confidence_metrics_entry

    # Showing model score based on threshold of 0.5
    print("Model classification metrics (threshold at 0.5):")
    for confidence_metrics_entry in confidence_metrics:
        if confidence_metrics_entry.confidence_threshold == 0.5:
            print(
                "Model Precision: {}%".format(
                    round(confidence_metrics_entry.precision * 100, 2)
                )
            )
            print(
                "Model Recall: {}%".format(
                    round(confidence_metrics_entry.recall * 100, 2)
                )
            )
            print(
                "Model F1 score: {}%".format(
                    round(confidence_metrics_entry.f1_score * 100, 2)
                )
            )
    print(f"Model AUPRC: {classification_metrics.au_prc}")
    print(f"Model AUROC: {classification_metrics.au_roc}")
    print(f"Model log loss: {classification_metrics.log_loss}")

regression_metrics = model_evaluation.regression_evaluation_metrics
if str(regression_metrics):
    print("Model regression metrics:")
    print(
        f"Model RMSE: {regression_metrics.root_mean_squared_error}"
    )
    print(f"Model MAE: {regression_metrics.mean_absolute_error}")
    print(
        "Model MAPE: {}".format(
            regression_metrics.mean_absolute_percentage_error
        )
    )
    print(f"Model R^2: {regression_metrics.r_squared}")

What's next

To search and filter code samples for other Google Cloud products, see the Google Cloud sample browser.