- 3.54.0 (latest)
- 3.53.0
- 3.52.0
- 3.50.0
- 3.49.0
- 3.48.0
- 3.47.0
- 3.46.0
- 3.45.0
- 3.44.0
- 3.43.0
- 3.42.0
- 3.41.0
- 3.40.0
- 3.38.0
- 3.37.0
- 3.36.0
- 3.35.0
- 3.34.0
- 3.33.0
- 3.32.0
- 3.31.0
- 3.30.0
- 3.29.0
- 3.28.0
- 3.25.0
- 3.24.0
- 3.23.0
- 3.22.0
- 3.21.0
- 3.20.0
- 3.19.0
- 3.18.0
- 3.17.0
- 3.16.0
- 3.15.0
- 3.14.0
- 3.13.0
- 3.12.0
- 3.11.0
- 3.10.0
- 3.9.0
- 3.8.0
- 3.7.0
- 3.6.0
- 3.5.0
- 3.4.2
- 3.3.0
- 3.2.0
- 3.0.0
- 2.9.8
- 2.8.9
- 2.7.4
- 2.5.3
- 2.4.0
public interface SafetyRatingOrBuilder extends MessageOrBuilder
Implements
MessageOrBuilderMethods
getBlocked()
public abstract boolean getBlocked()
Output only. Indicates whether the content was filtered out because of this rating.
bool blocked = 3 [(.google.api.field_behavior) = OUTPUT_ONLY];
Returns | |
---|---|
Type | Description |
boolean |
The blocked. |
getCategory()
public abstract HarmCategory getCategory()
Output only. Harm category.
.google.cloud.aiplatform.v1.HarmCategory category = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
Returns | |
---|---|
Type | Description |
HarmCategory |
The category. |
getCategoryValue()
public abstract int getCategoryValue()
Output only. Harm category.
.google.cloud.aiplatform.v1.HarmCategory category = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
Returns | |
---|---|
Type | Description |
int |
The enum numeric value on the wire for category. |
getProbability()
public abstract SafetyRating.HarmProbability getProbability()
Output only. Harm probability levels in the content.
.google.cloud.aiplatform.v1.SafetyRating.HarmProbability probability = 2 [(.google.api.field_behavior) = OUTPUT_ONLY];
Returns | |
---|---|
Type | Description |
SafetyRating.HarmProbability |
The probability. |
getProbabilityScore()
public abstract float getProbabilityScore()
Output only. Harm probability score.
float probability_score = 5 [(.google.api.field_behavior) = OUTPUT_ONLY];
Returns | |
---|---|
Type | Description |
float |
The probabilityScore. |
getProbabilityValue()
public abstract int getProbabilityValue()
Output only. Harm probability levels in the content.
.google.cloud.aiplatform.v1.SafetyRating.HarmProbability probability = 2 [(.google.api.field_behavior) = OUTPUT_ONLY];
Returns | |
---|---|
Type | Description |
int |
The enum numeric value on the wire for probability. |
getSeverity()
public abstract SafetyRating.HarmSeverity getSeverity()
Output only. Harm severity levels in the content.
.google.cloud.aiplatform.v1.SafetyRating.HarmSeverity severity = 6 [(.google.api.field_behavior) = OUTPUT_ONLY];
Returns | |
---|---|
Type | Description |
SafetyRating.HarmSeverity |
The severity. |
getSeverityScore()
public abstract float getSeverityScore()
Output only. Harm severity score.
float severity_score = 7 [(.google.api.field_behavior) = OUTPUT_ONLY];
Returns | |
---|---|
Type | Description |
float |
The severityScore. |
getSeverityValue()
public abstract int getSeverityValue()
Output only. Harm severity levels in the content.
.google.cloud.aiplatform.v1.SafetyRating.HarmSeverity severity = 6 [(.google.api.field_behavior) = OUTPUT_ONLY];
Returns | |
---|---|
Type | Description |
int |
The enum numeric value on the wire for severity. |