- 2.54.0 (latest)
- 2.53.0
- 2.52.0
- 2.51.0
- 2.49.0
- 2.48.0
- 2.47.0
- 2.46.0
- 2.45.0
- 2.44.0
- 2.43.0
- 2.42.0
- 2.41.0
- 2.40.0
- 2.39.0
- 2.37.0
- 2.36.0
- 2.35.0
- 2.34.0
- 2.33.0
- 2.32.0
- 2.31.0
- 2.30.0
- 2.29.0
- 2.28.0
- 2.27.0
- 2.24.0
- 2.23.0
- 2.22.0
- 2.21.0
- 2.20.0
- 2.19.0
- 2.18.0
- 2.17.0
- 2.16.0
- 2.15.0
- 2.14.0
- 2.13.0
- 2.12.0
- 2.11.0
- 2.10.0
- 2.9.0
- 2.8.0
- 2.7.0
- 2.6.0
- 2.5.0
- 2.4.0
- 2.3.18
- 2.2.3
- 2.1.23
public static final class ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder extends GeneratedMessageV3.Builder<ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder> implements ClassificationEvaluationMetrics.ConfidenceMetricsEntryOrBuilder
Metrics for a single confidence threshold.
Protobuf type
google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry
Inheritance
Object > AbstractMessageLite.Builder<MessageType,BuilderType> > AbstractMessage.Builder<BuilderType> > GeneratedMessageV3.Builder > ClassificationEvaluationMetrics.ConfidenceMetricsEntry.BuilderStatic Methods
getDescriptor()
public static final Descriptors.Descriptor getDescriptor()
Type | Description |
Descriptor |
Methods
addRepeatedField(Descriptors.FieldDescriptor field, Object value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder addRepeatedField(Descriptors.FieldDescriptor field, Object value)
Name | Description |
field | FieldDescriptor |
value | Object |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder |
build()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry build()
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry |
buildPartial()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry buildPartial()
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry |
clear()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clear()
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder |
clearConfidenceThreshold()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearConfidenceThreshold()
Output only. Metrics are computed with an assumption that the model never returns predictions with score lower than this value.
float confidence_threshold = 1;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clearF1Score()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearF1Score()
Output only. The harmonic mean of recall and precision.
float f1_score = 4;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clearF1ScoreAt1()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearF1ScoreAt1()
Output only. The harmonic mean of recall_at1 and precision_at1.
float f1_score_at1 = 7;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clearFalseNegativeCount()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearFalseNegativeCount()
Output only. The number of ground truth labels that are not matched by a model created label.
int64 false_negative_count = 12;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clearFalsePositiveCount()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearFalsePositiveCount()
Output only. The number of model created labels that do not match a ground truth label.
int64 false_positive_count = 11;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clearFalsePositiveRate()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearFalsePositiveRate()
Output only. False Positive Rate for the given confidence threshold.
float false_positive_rate = 8;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clearFalsePositiveRateAt1()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearFalsePositiveRateAt1()
Output only. The False Positive Rate when only considering the label that has the highest prediction score and not below the confidence threshold for each example.
float false_positive_rate_at1 = 9;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clearField(Descriptors.FieldDescriptor field)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearField(Descriptors.FieldDescriptor field)
Name | Description |
field | FieldDescriptor |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder |
clearOneof(Descriptors.OneofDescriptor oneof)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearOneof(Descriptors.OneofDescriptor oneof)
Name | Description |
oneof | OneofDescriptor |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder |
clearPositionThreshold()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearPositionThreshold()
Output only. Metrics are computed with an assumption that the model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet the confidence_threshold.
int32 position_threshold = 14;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clearPrecision()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearPrecision()
Output only. Precision for the given confidence threshold.
float precision = 3;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clearPrecisionAt1()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearPrecisionAt1()
Output only. The precision when only considering the label that has the highest prediction score and not below the confidence threshold for each example.
float precision_at1 = 6;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clearRecall()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearRecall()
Output only. Recall (True Positive Rate) for the given confidence threshold.
float recall = 2;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clearRecallAt1()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearRecallAt1()
Output only. The Recall (True Positive Rate) when only considering the label that has the highest prediction score and not below the confidence threshold for each example.
float recall_at1 = 5;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clearTrueNegativeCount()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearTrueNegativeCount()
Output only. The number of labels that were not created by the model, but if they would, they would not match a ground truth label.
int64 true_negative_count = 13;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clearTruePositiveCount()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clearTruePositiveCount()
Output only. The number of model created labels that match a ground truth label.
int64 true_positive_count = 10;
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
clone()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder clone()
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder |
getConfidenceThreshold()
public float getConfidenceThreshold()
Output only. Metrics are computed with an assumption that the model never returns predictions with score lower than this value.
float confidence_threshold = 1;
Type | Description |
float | The confidenceThreshold. |
getDefaultInstanceForType()
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry getDefaultInstanceForType()
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry |
getDescriptorForType()
public Descriptors.Descriptor getDescriptorForType()
Type | Description |
Descriptor |
getF1Score()
public float getF1Score()
Output only. The harmonic mean of recall and precision.
float f1_score = 4;
Type | Description |
float | The f1Score. |
getF1ScoreAt1()
public float getF1ScoreAt1()
Output only. The harmonic mean of recall_at1 and precision_at1.
float f1_score_at1 = 7;
Type | Description |
float | The f1ScoreAt1. |
getFalseNegativeCount()
public long getFalseNegativeCount()
Output only. The number of ground truth labels that are not matched by a model created label.
int64 false_negative_count = 12;
Type | Description |
long | The falseNegativeCount. |
getFalsePositiveCount()
public long getFalsePositiveCount()
Output only. The number of model created labels that do not match a ground truth label.
int64 false_positive_count = 11;
Type | Description |
long | The falsePositiveCount. |
getFalsePositiveRate()
public float getFalsePositiveRate()
Output only. False Positive Rate for the given confidence threshold.
float false_positive_rate = 8;
Type | Description |
float | The falsePositiveRate. |
getFalsePositiveRateAt1()
public float getFalsePositiveRateAt1()
Output only. The False Positive Rate when only considering the label that has the highest prediction score and not below the confidence threshold for each example.
float false_positive_rate_at1 = 9;
Type | Description |
float | The falsePositiveRateAt1. |
getPositionThreshold()
public int getPositionThreshold()
Output only. Metrics are computed with an assumption that the model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet the confidence_threshold.
int32 position_threshold = 14;
Type | Description |
int | The positionThreshold. |
getPrecision()
public float getPrecision()
Output only. Precision for the given confidence threshold.
float precision = 3;
Type | Description |
float | The precision. |
getPrecisionAt1()
public float getPrecisionAt1()
Output only. The precision when only considering the label that has the highest prediction score and not below the confidence threshold for each example.
float precision_at1 = 6;
Type | Description |
float | The precisionAt1. |
getRecall()
public float getRecall()
Output only. Recall (True Positive Rate) for the given confidence threshold.
float recall = 2;
Type | Description |
float | The recall. |
getRecallAt1()
public float getRecallAt1()
Output only. The Recall (True Positive Rate) when only considering the label that has the highest prediction score and not below the confidence threshold for each example.
float recall_at1 = 5;
Type | Description |
float | The recallAt1. |
getTrueNegativeCount()
public long getTrueNegativeCount()
Output only. The number of labels that were not created by the model, but if they would, they would not match a ground truth label.
int64 true_negative_count = 13;
Type | Description |
long | The trueNegativeCount. |
getTruePositiveCount()
public long getTruePositiveCount()
Output only. The number of model created labels that match a ground truth label.
int64 true_positive_count = 10;
Type | Description |
long | The truePositiveCount. |
internalGetFieldAccessorTable()
protected GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()
Type | Description |
FieldAccessorTable |
isInitialized()
public final boolean isInitialized()
Type | Description |
boolean |
mergeFrom(ClassificationEvaluationMetrics.ConfidenceMetricsEntry other)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder mergeFrom(ClassificationEvaluationMetrics.ConfidenceMetricsEntry other)
Name | Description |
other | ClassificationEvaluationMetrics.ConfidenceMetricsEntry |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder |
mergeFrom(CodedInputStream input, ExtensionRegistryLite extensionRegistry)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder mergeFrom(CodedInputStream input, ExtensionRegistryLite extensionRegistry)
Name | Description |
input | CodedInputStream |
extensionRegistry | ExtensionRegistryLite |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder |
Type | Description |
IOException |
mergeFrom(Message other)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder mergeFrom(Message other)
Name | Description |
other | Message |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder |
mergeUnknownFields(UnknownFieldSet unknownFields)
public final ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder mergeUnknownFields(UnknownFieldSet unknownFields)
Name | Description |
unknownFields | UnknownFieldSet |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder |
setConfidenceThreshold(float value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setConfidenceThreshold(float value)
Output only. Metrics are computed with an assumption that the model never returns predictions with score lower than this value.
float confidence_threshold = 1;
Name | Description |
value | float The confidenceThreshold to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setF1Score(float value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setF1Score(float value)
Output only. The harmonic mean of recall and precision.
float f1_score = 4;
Name | Description |
value | float The f1Score to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setF1ScoreAt1(float value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setF1ScoreAt1(float value)
Output only. The harmonic mean of recall_at1 and precision_at1.
float f1_score_at1 = 7;
Name | Description |
value | float The f1ScoreAt1 to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setFalseNegativeCount(long value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setFalseNegativeCount(long value)
Output only. The number of ground truth labels that are not matched by a model created label.
int64 false_negative_count = 12;
Name | Description |
value | long The falseNegativeCount to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setFalsePositiveCount(long value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setFalsePositiveCount(long value)
Output only. The number of model created labels that do not match a ground truth label.
int64 false_positive_count = 11;
Name | Description |
value | long The falsePositiveCount to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setFalsePositiveRate(float value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setFalsePositiveRate(float value)
Output only. False Positive Rate for the given confidence threshold.
float false_positive_rate = 8;
Name | Description |
value | float The falsePositiveRate to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setFalsePositiveRateAt1(float value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setFalsePositiveRateAt1(float value)
Output only. The False Positive Rate when only considering the label that has the highest prediction score and not below the confidence threshold for each example.
float false_positive_rate_at1 = 9;
Name | Description |
value | float The falsePositiveRateAt1 to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setField(Descriptors.FieldDescriptor field, Object value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setField(Descriptors.FieldDescriptor field, Object value)
Name | Description |
field | FieldDescriptor |
value | Object |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder |
setPositionThreshold(int value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setPositionThreshold(int value)
Output only. Metrics are computed with an assumption that the model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet the confidence_threshold.
int32 position_threshold = 14;
Name | Description |
value | int The positionThreshold to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setPrecision(float value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setPrecision(float value)
Output only. Precision for the given confidence threshold.
float precision = 3;
Name | Description |
value | float The precision to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setPrecisionAt1(float value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setPrecisionAt1(float value)
Output only. The precision when only considering the label that has the highest prediction score and not below the confidence threshold for each example.
float precision_at1 = 6;
Name | Description |
value | float The precisionAt1 to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setRecall(float value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setRecall(float value)
Output only. Recall (True Positive Rate) for the given confidence threshold.
float recall = 2;
Name | Description |
value | float The recall to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setRecallAt1(float value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setRecallAt1(float value)
Output only. The Recall (True Positive Rate) when only considering the label that has the highest prediction score and not below the confidence threshold for each example.
float recall_at1 = 5;
Name | Description |
value | float The recallAt1 to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setRepeatedField(Descriptors.FieldDescriptor field, int index, Object value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setRepeatedField(Descriptors.FieldDescriptor field, int index, Object value)
Name | Description |
field | FieldDescriptor |
index | int |
value | Object |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder |
setTrueNegativeCount(long value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setTrueNegativeCount(long value)
Output only. The number of labels that were not created by the model, but if they would, they would not match a ground truth label.
int64 true_negative_count = 13;
Name | Description |
value | long The trueNegativeCount to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setTruePositiveCount(long value)
public ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setTruePositiveCount(long value)
Output only. The number of model created labels that match a ground truth label.
int64 true_positive_count = 10;
Name | Description |
value | long The truePositiveCount to set. |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder | This builder for chaining. |
setUnknownFields(UnknownFieldSet unknownFields)
public final ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder setUnknownFields(UnknownFieldSet unknownFields)
Name | Description |
unknownFields | UnknownFieldSet |
Type | Description |
ClassificationEvaluationMetrics.ConfidenceMetricsEntry.Builder |