This legacy version of AutoML Tables is deprecated and will no longer be available on Google Cloud after January 23, 2024. All the functionality of legacy AutoML Tables and new features are available on the Vertex AI platform. See Migrate to Vertex AI to learn how to migrate your resources.

List model evaluations

Stay organized with collections Save and categorize content based on your preferences.

Demonstrate listing model evaluations.

Explore further

For detailed documentation that includes this code sample, see the following:

Code sample

Java

import com.google.cloud.automl.v1beta1.AutoMlClient;
import com.google.cloud.automl.v1beta1.ListModelEvaluationsRequest;
import com.google.cloud.automl.v1beta1.ModelEvaluation;
import com.google.cloud.automl.v1beta1.ModelName;
import java.io.IOException;

class ListModelEvaluations {

  public static void main(String[] args) throws IOException {
    // TODO(developer): Replace these variables before running the sample.
    String projectId = "YOUR_PROJECT_ID";
    String modelId = "YOUR_MODEL_ID";
    listModelEvaluations(projectId, modelId);
  }

  // List model evaluations
  static void listModelEvaluations(String projectId, String modelId) throws IOException {
    // Initialize client that will be used to send requests. This client only needs to be created
    // once, and can be reused for multiple requests. After completing all of your requests, call
    // the "close" method on the client to safely clean up any remaining background resources.
    try (AutoMlClient client = AutoMlClient.create()) {
      // Get the full path of the model.
      ModelName modelFullId = ModelName.of(projectId, "us-central1", modelId);
      ListModelEvaluationsRequest modelEvaluationsrequest =
          ListModelEvaluationsRequest.newBuilder().setParent(modelFullId.toString()).build();

      // List all the model evaluations in the model by applying filter.
      System.out.println("List of model evaluations:");
      for (ModelEvaluation modelEvaluation :
          client.listModelEvaluations(modelEvaluationsrequest).iterateAll()) {

        System.out.format("Model Evaluation Name: %s%n", modelEvaluation.getName());
        System.out.format("Model Annotation Spec Id: %s", modelEvaluation.getAnnotationSpecId());
        System.out.println("Create Time:");
        System.out.format("\tseconds: %s%n", modelEvaluation.getCreateTime().getSeconds());
        System.out.format("\tnanos: %s", modelEvaluation.getCreateTime().getNanos() / 1e9);
        System.out.format(
            "Evalution Example Count: %d%n", modelEvaluation.getEvaluatedExampleCount());

        System.out.format(
            "Tables Model Evaluation Metrics: %s%n",
            modelEvaluation.getClassificationEvaluationMetrics());
      }
    }
  }
}

Node.js

const automl = require('@google-cloud/automl');
const math = require('mathjs');
const client = new automl.v1beta1.AutoMlClient();

/**
 * Demonstrates using the AutoML client to list model evaluations.
 * TODO(developer): Uncomment the following lines before running the sample.
 */
// const projectId = '[PROJECT_ID]' e.g., "my-gcloud-project";
// const computeRegion = '[REGION_NAME]' e.g., "us-central1";
// const modelId = '[MODEL_ID]' e.g., "TBL4704590352927948800";
// const filter = '[FILTER_EXPRESSIONS]' e.g., "tablesModelMetadata:*";

// Get the full path of the model.
const modelFullId = client.modelPath(projectId, computeRegion, modelId);

// List all the model evaluations in the model by applying filter.
client
  .listModelEvaluations({parent: modelFullId, filter: filter})
  .then(responses => {
    const element = responses[0];
    console.log('List of model evaluations:');
    for (let i = 0; i < element.length; i++) {
      const classMetrics = element[i].classificationEvaluationMetrics;
      const regressionMetrics = element[i].regressionEvaluationMetrics;
      const evaluationId = element[i].name.split('/')[7].split('`')[0];

      console.log(`Model evaluation name: ${element[i].name}`);
      console.log(`Model evaluation Id: ${evaluationId}`);
      console.log(
        `Model evaluation annotation spec Id: ${element[i].annotationSpecId}`
      );
      console.log(`Model evaluation display name: ${element[i].displayName}`);
      console.log(
        `Model evaluation example count: ${element[i].evaluatedExampleCount}`
      );

      if (classMetrics) {
        const confidenceMetricsEntries = classMetrics.confidenceMetricsEntry;

        console.log('Table classification evaluation metrics:');
        console.log(`\tModel auPrc: ${math.round(classMetrics.auPrc, 6)}`);
        console.log(`\tModel auRoc: ${math.round(classMetrics.auRoc, 6)}`);
        console.log(
          `\tModel log loss: ${math.round(classMetrics.logLoss, 6)}`
        );

        if (confidenceMetricsEntries.length > 0) {
          console.log('\tConfidence metrics entries:');

          for (const confidenceMetricsEntry of confidenceMetricsEntries) {
            console.log(
              `\t\tModel confidence threshold: ${math.round(
                confidenceMetricsEntry.confidenceThreshold,
                6
              )}`
            );
            console.log(
              `\t\tModel position threshold: ${math.round(
                confidenceMetricsEntry.positionThreshold,
                4
              )}`
            );
            console.log(
              `\t\tModel recall: ${math.round(
                confidenceMetricsEntry.recall * 100,
                2
              )} %`
            );
            console.log(
              `\t\tModel precision: ${math.round(
                confidenceMetricsEntry.precision * 100,
                2
              )} %`
            );
            console.log(
              `\t\tModel false positive rate: ${confidenceMetricsEntry.falsePositiveRate}`
            );
            console.log(
              `\t\tModel f1 score: ${math.round(
                confidenceMetricsEntry.f1Score * 100,
                2
              )} %`
            );
            console.log(
              `\t\tModel recall@1: ${math.round(
                confidenceMetricsEntry.recallAt1 * 100,
                2
              )} %`
            );
            console.log(
              `\t\tModel precision@1: ${math.round(
                confidenceMetricsEntry.precisionAt1 * 100,
                2
              )} %`
            );
            console.log(
              `\t\tModel false positive rate@1: ${confidenceMetricsEntry.falsePositiveRateAt1}`
            );
            console.log(
              `\t\tModel f1 score@1: ${math.round(
                confidenceMetricsEntry.f1ScoreAt1 * 100,
                2
              )} %`
            );
            console.log(
              `\t\tModel true positive count: ${confidenceMetricsEntry.truePositiveCount}`
            );
            console.log(
              `\t\tModel false positive count: ${confidenceMetricsEntry.falsePositiveCount}`
            );
            console.log(
              `\t\tModel false negative count: ${confidenceMetricsEntry.falseNegativeCount}`
            );
            console.log(
              `\t\tModel true negative count: ${confidenceMetricsEntry.trueNegativeCount}`
            );
            console.log('\n');
          }
        }
        console.log(
          `\tModel annotation spec Id: ${classMetrics.annotationSpecId}`
        );
      } else if (regressionMetrics) {
        console.log('Table regression evaluation metrics:');
        console.log(
          `\tModel root mean squared error: ${regressionMetrics.rootMeanSquaredError}`
        );
        console.log(
          `\tModel mean absolute error: ${regressionMetrics.meanAbsoluteError}`
        );
        console.log(
          `\tModel mean absolute percentage error: ${regressionMetrics.meanAbsolutePercentageError}`
        );
        console.log(`\tModel rSquared: ${regressionMetrics.rSquared}`);
      }
      console.log('\n');
    }
  })
  .catch(err => {
    console.error(err);
  });

Python

# TODO(developer): Uncomment and set the following variables
# project_id = 'PROJECT_ID_HERE'
# compute_region = 'COMPUTE_REGION_HERE'
# model_display_name = 'MODEL_DISPLAY_NAME_HERE'
# filter = 'filter expression here'

from google.cloud import automl_v1beta1 as automl

client = automl.TablesClient(project=project_id, region=compute_region)

# List all the model evaluations in the model by applying filter.
response = client.list_model_evaluations(
    model_display_name=model_display_name, filter=filter
)

print("List of model evaluations:")
for evaluation in response:
    print("Model evaluation name: {}".format(evaluation.name))
    print("Model evaluation id: {}".format(evaluation.name.split("/")[-1]))
    print(
        "Model evaluation example count: {}".format(
            evaluation.evaluated_example_count
        )
    )
    print("Model evaluation time: {}".format(evaluation.create_time))
    print("\n")

What's next

To search and filter code samples for other Google Cloud products, see the Google Cloud sample browser.