Inspecciona texto en busca de entidades conocidas (sustantivos propios, como figuras públicas y puntos de referencia), y muestra información sobre ellas.
Páginas de documentación que incluyen esta muestra de código
Para ver la muestra de código usada en contexto, consulta la siguiente documentación:
Muestra de código
C#
private static void AnalyzeEntitiesFromText(string text)
{
var client = LanguageServiceClient.Create();
var response = client.AnalyzeEntities(new Document()
{
Content = text,
Type = Document.Types.Type.PlainText
});
WriteEntities(response.Entities);
}
private static void WriteEntities(IEnumerable<Entity> entities)
{
Console.WriteLine("Entities:");
foreach (var entity in entities)
{
Console.WriteLine($"\tName: {entity.Name}");
Console.WriteLine($"\tType: {entity.Type}");
Console.WriteLine($"\tSalience: {entity.Salience}");
Console.WriteLine("\tMentions:");
foreach (var mention in entity.Mentions)
Console.WriteLine($"\t\t{mention.Text.BeginOffset}: {mention.Text.Content}");
Console.WriteLine("\tMetadata:");
foreach (var keyval in entity.Metadata)
{
Console.WriteLine($"\t\t{keyval.Key}: {keyval.Value}");
}
}
}
Go
func analyzeEntities(ctx context.Context, client *language.Client, text string) (*languagepb.AnalyzeEntitiesResponse, error) {
return client.AnalyzeEntities(ctx, &languagepb.AnalyzeEntitiesRequest{
Document: &languagepb.Document{
Source: &languagepb.Document_Content{
Content: text,
},
Type: languagepb.Document_PLAIN_TEXT,
},
EncodingType: languagepb.EncodingType_UTF8,
})
}
Java
// Instantiate the Language client com.google.cloud.language.v1.LanguageServiceClient
try (LanguageServiceClient language = LanguageServiceClient.create()) {
Document doc = Document.newBuilder().setContent(text).setType(Type.PLAIN_TEXT).build();
AnalyzeEntitiesRequest request =
AnalyzeEntitiesRequest.newBuilder()
.setDocument(doc)
.setEncodingType(EncodingType.UTF16)
.build();
AnalyzeEntitiesResponse response = language.analyzeEntities(request);
// Print the response
for (Entity entity : response.getEntitiesList()) {
System.out.printf("Entity: %s", entity.getName());
System.out.printf("Salience: %.3f\n", entity.getSalience());
System.out.println("Metadata: ");
for (Map.Entry<String, String> entry : entity.getMetadataMap().entrySet()) {
System.out.printf("%s : %s", entry.getKey(), entry.getValue());
}
for (EntityMention mention : entity.getMentionsList()) {
System.out.printf("Begin offset: %d\n", mention.getText().getBeginOffset());
System.out.printf("Content: %s\n", mention.getText().getContent());
System.out.printf("Type: %s\n\n", mention.getType());
}
}
}
Node.js
// Imports the Google Cloud client library
const language = require('@google-cloud/language');
// Creates a client
const client = new language.LanguageServiceClient();
/**
* TODO(developer): Uncomment the following line to run this code.
*/
// const text = 'Your text to analyze, e.g. Hello, world!';
// Prepares a document, representing the provided text
const document = {
content: text,
type: 'PLAIN_TEXT',
};
// Detects entities in the document
const [result] = await client.analyzeEntities({document});
const entities = result.entities;
console.log('Entities:');
entities.forEach(entity => {
console.log(entity.name);
console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`);
if (entity.metadata && entity.metadata.wikipedia_url) {
console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}`);
}
});
PHP
use Google\Cloud\Language\V1\Document;
use Google\Cloud\Language\V1\Document\Type;
use Google\Cloud\Language\V1\LanguageServiceClient;
use Google\Cloud\Language\V1\Entity\Type as EntityType;
/** Uncomment and populate these variables in your code */
// $text = 'The text to analyze.';
// Create the Natural Language client
$languageServiceClient = new LanguageServiceClient();
try {
// Create a new Document, add text as content and set type to PLAIN_TEXT
$document = (new Document())
->setContent($text)
->setType(Type::PLAIN_TEXT);
// Call the analyzeEntities function
$response = $languageServiceClient->analyzeEntities($document, []);
$entities = $response->getEntities();
// Print out information about each entity
foreach ($entities as $entity) {
printf('Name: %s' . PHP_EOL, $entity->getName());
printf('Type: %s' . PHP_EOL, EntityType::name($entity->getType()));
printf('Salience: %s' . PHP_EOL, $entity->getSalience());
if ($entity->getMetadata()->offsetExists('wikipedia_url')) {
printf('Wikipedia URL: %s' . PHP_EOL, $entity->getMetadata()->offsetGet('wikipedia_url'));
}
if ($entity->getMetadata()->offsetExists('mid')) {
printf('Knowledge Graph MID: %s' . PHP_EOL, $entity->getMetadata()->offsetGet('mid'));
}
printf(PHP_EOL);
}
} finally {
$languageServiceClient->close();
}
Python
from google.cloud import language_v1
def sample_analyze_entities(text_content):
"""
Analyzing Entities in a String
Args:
text_content The text content to analyze
"""
client = language_v1.LanguageServiceClient()
# text_content = 'California is a state.'
# Available types: PLAIN_TEXT, HTML
type_ = language_v1.Document.Type.PLAIN_TEXT
# Optional. If not specified, the language is automatically detected.
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text_content, "type_": type_, "language": language}
# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type})
# Loop through entitites returned from the API
for entity in response.entities:
print(u"Representative name for the entity: {}".format(entity.name))
# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name))
# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
# Loop over the metadata associated with entity. For many known entities,
# the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid).
# Some entity types may have additional metadata, e.g. ADDRESS entities
# may have metadata for the address street_name, postal_code, et al.
for metadata_name, metadata_value in entity.metadata.items():
print(u"{}: {}".format(metadata_name, metadata_value))
# Loop over the mentions of this entity in the input document.
# The API currently supports proper noun mentions.
for mention in entity.mentions:
print(u"Mention text: {}".format(mention.text.content))
# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name)
)
# Get the language of the text, which will be the same as
# the language specified in the request or, if not specified,
# the automatically-detected language.
print(u"Language of the text: {}".format(response.language))
Ruby
# text_content = "Text to extract entities from"
require "google/cloud/language"
language = Google::Cloud::Language.language_service
document = { content: text_content, type: :PLAIN_TEXT }
response = language.analyze_entities document: document
entities = response.entities
entities.each do |entity|
puts "Entity #{entity.name} #{entity.type}"
puts "URL: #{entity.metadata['wikipedia_url']}" if entity.metadata["wikipedia_url"]
end