Detecting Objects

Object Localization can detect and extract multiple objects in an image.

Object localization identifies multiple objects in an image and provides a LocalizedObjectAnnotation for each object in the image.

Localizing objects in a local image

C#

Before trying this sample, follow the C# setup instructions in the Vision API Quickstart Using Client Libraries . For more information, see the Vision API C# API reference documentation .

            var client = ImageAnnotatorClient.Create();
            var response = client.DetectLocalizedObjects(image);

            Console.WriteLine($"Number of objects found {response.Count}");
            foreach (var localizedObject in response)
            {
                Console.Write($"\n{localizedObject.Name}");
                Console.WriteLine($" (confidence: {localizedObject.Score})");
                Console.WriteLine("Normalized bounding polygon vertices: ");

                foreach (var vertex
                        in localizedObject.BoundingPoly.NormalizedVertices)
                {
                    Console.WriteLine($" - ({vertex.X}, {vertex.Y})");
                }
            }

Go

Before trying this sample, follow the Go setup instructions in the Vision API Quickstart Using Client Libraries . For more information, see the Vision API Go API reference documentation .

// localizeObjects gets objects and bounding boxes from the Vision API for an image at the given file path.
func localizeObjects(w io.Writer, file string) error {
	ctx := context.Background()

	client, err := vision.NewImageAnnotatorClient(ctx)
	if err != nil {
		return err
	}

	f, err := os.Open(file)
	if err != nil {
		return err
	}
	defer f.Close()

	image, err := vision.NewImageFromReader(f)
	if err != nil {
		return err
	}
	annotations, err := client.LocalizeObjects(ctx, image, nil)
	if err != nil {
		return err
	}

	if len(annotations) == 0 {
		fmt.Fprintln(w, "No objects found.")
		return nil
	}

	fmt.Fprintln(w, "Objects:")
	for _, annotation := range annotations {
		fmt.Fprintln(w, annotation.Name)
		fmt.Fprintln(w, annotation.Score)

		for _, v := range annotation.BoundingPoly.NormalizedVertices {
			fmt.Fprintf(w, "(%f,%f)\n", v.X, v.Y)
		}
	}

	return nil
}

Java

Before trying this sample, follow the Java setup instructions in the Vision API Quickstart Using Client Libraries . For more information, see the Vision API Java API reference documentation .

/**
 * Detects localized objects in the specified local image.
 *
 * @param filePath The path to the file to perform localized object detection on.
 * @param out A {@link PrintStream} to write detected objects to.
 * @throws Exception on errors while closing the client.
 * @throws IOException on Input/Output errors.
 */
public static void detectLocalizedObjects(String filePath, PrintStream out)
    throws Exception, IOException {
  List<AnnotateImageRequest> requests = new ArrayList<>();

  ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));

  Image img = Image.newBuilder().setContent(imgBytes).build();
  AnnotateImageRequest request =
      AnnotateImageRequest.newBuilder()
          .addFeatures(Feature.newBuilder().setType(Type.OBJECT_LOCALIZATION))
          .setImage(img)
          .build();
  requests.add(request);

  // Perform the request
  try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
    BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
    List<AnnotateImageResponse> responses = response.getResponsesList();

    // Display the results
    for (AnnotateImageResponse res : responses) {
      for (LocalizedObjectAnnotation entity : res.getLocalizedObjectAnnotationsList()) {
        out.format("Object name: %s\n", entity.getName());
        out.format("Confidence: %s\n", entity.getScore());
        out.format("Normalized Vertices:\n");
        entity
            .getBoundingPoly()
            .getNormalizedVerticesList()
            .forEach(vertex -> out.format("- (%s, %s)\n", vertex.getX(), vertex.getY()));
      }
    }
  }
}

Node.js

Before trying this sample, follow the Node.js setup instructions in the Vision API Quickstart Using Client Libraries . For more information, see the Vision API Node.js API reference documentation .

// Imports the Google Cloud client libraries
const vision = require('@google-cloud/vision');
const fs = require('fs');

// Creates a client
const client = new vision.ImageAnnotatorClient();

/**
 * TODO(developer): Uncomment the following line before running the sample.
 */
// const fileName = `/path/to/localImage.png`;
const request = {
  image: {content: fs.readFileSync(fileName)},
};

client
  .objectLocalization(request)
  .then(results => {
    const objects = results[0].localizedObjectAnnotations;
    objects.forEach(object => {
      console.log(`Name: ${object.name}`);
      console.log(`Confidence: ${object.score}`);
      const vertices = object.boundingPoly.normalizedVertices;
      vertices.forEach(v => console.log(`x: ${v.x}, y:${v.y}`));
    });
  })
  .catch(err => {
    console.error('ERROR:', err);
  });

PHP

Before trying this sample, follow the PHP setup instructions in the Vision API Quickstart Using Client Libraries . For more information, see the Vision API PHP API reference documentation .

namespace Google\Cloud\Samples\Vision;

use Google\Cloud\Vision\V1\ImageAnnotatorClient;

// $path = 'path/to/your/image.jpg'

function detect_object($path)
{
    $imageAnnotator = new ImageAnnotatorClient();

    # annotate the image
    $image = file_get_contents($path);
    $response = $imageAnnotator->objectLocalization($image);
    $objects = $response->getLocalizedObjectAnnotations();

    foreach ($objects as $object) {
        $name = $object->getName();
        $score = $object->getScore();
        $vertices = $object->getBoundingPoly()->getNormalizedVertices();

        printf('%s (confidence %d)):' . PHP_EOL, $name, $score);
        print('normalized bounding polygon vertices: ');
        foreach ($vertices as $vertex) {
            printf(' (%d, %d)', $vertex->getX(), $vertex->getY());
        }
        print(PHP_EOL);
    }

    $imageAnnotator->close();
}

Python

Before you can run the code in this example, you must install the Vision Client Library library.

The following example shows how to call the images:annotate method and specify request object localization.

def localize_objects(path):
    """Localize objects in the local image.

    Args:
    path: The path to the local file.
    """
    from google.cloud import vision
    client = vision.ImageAnnotatorClient()

    with open(path, 'rb') as image_file:
        content = image_file.read()
    image = vision.types.Image(content=content)

    objects = client.object_localization(
        image=image).localized_object_annotations

    print('Number of objects found: {}'.format(len(objects)))
    for object_ in objects:
        print('\n{} (confidence: {})'.format(object_.name, object_.score))
        print('Normalized bounding polygon vertices: ')
        for vertex in object_.bounding_poly.normalized_vertices:
            print(' - ({}, {})'.format(vertex.x, vertex.y))

Ruby

Before trying this sample, follow the Ruby setup instructions in the Vision API Quickstart Using Client Libraries . For more information, see the Vision API Ruby API reference documentation .

# image_path = "Path to local image file, eg. './image.png'"

require "google/cloud/vision"

vision = Google::Cloud::Vision.new
image  = vision.image image_path

image.object_localizations.each do |object|
  puts "#{object.name} (confidence: #{object.score})"
  puts "Normalized bounding polygon vertices:"
  object.bounds.each do |vertex|
    puts " - (#{vertex.x}, #{vertex.y})"
  end
end

Localizing objects in a remote image

For your convenience, the Vision API can perform Object Localization directly on an image file located in Google Cloud Storage or on the Web without the need to send the contents of the image file in the body of your request.

Protocol

Refer to the images:annotate API endpoint for complete details.

To perform Label Detection, make a POST request and provide the appropriate request body:

POST https://vision.googleapis.com/v1/images:annotate?key=YOUR_API_KEY
{
  "requests": [
    {
      "image": {
        "source": {
          "imageUri": "gs://YOUR_BUCKET_NAME/YOUR_FILE_NAME"
        }
      },
      "features": [
        {
          "type": "OBJECT_LOCALIZATION"
        }
      ]
    }
  ]
}

See the AnnotateImageRequest reference documentation for more information on configuring the request body.

C#

Before trying this sample, follow the C# setup instructions in the Vision API Quickstart Using Client Libraries . For more information, see the Vision API C# API reference documentation .

            var client = ImageAnnotatorClient.Create();
            var response = client.DetectLocalizedObjects(image);

            Console.WriteLine($"Number of objects found {response.Count}");
            foreach (var localizedObject in response)
            {
                Console.Write($"\n{localizedObject.Name}");
                Console.WriteLine($" (confidence: {localizedObject.Score})");
                Console.WriteLine("Normalized bounding polygon vertices: ");

                foreach (var vertex
                        in localizedObject.BoundingPoly.NormalizedVertices)
                {
                    Console.WriteLine($" - ({vertex.X}, {vertex.Y})");
                }
            }

Go

Before trying this sample, follow the Go setup instructions in the Vision API Quickstart Using Client Libraries . For more information, see the Vision API Go API reference documentation .

// localizeObjects gets objects and bounding boxes from the Vision API for an image at the given file path.
func localizeObjectsURI(w io.Writer, file string) error {
	ctx := context.Background()

	client, err := vision.NewImageAnnotatorClient(ctx)
	if err != nil {
		return err
	}

	image := vision.NewImageFromURI(file)
	annotations, err := client.LocalizeObjects(ctx, image, nil)
	if err != nil {
		return err
	}

	if len(annotations) == 0 {
		fmt.Fprintln(w, "No objects found.")
		return nil
	}

	fmt.Fprintln(w, "Objects:")
	for _, annotation := range annotations {
		fmt.Fprintln(w, annotation.Name)
		fmt.Fprintln(w, annotation.Score)

		for _, v := range annotation.BoundingPoly.NormalizedVertices {
			fmt.Fprintf(w, "(%f,%f)\n", v.X, v.Y)
		}
	}

	return nil
}

Java

Before trying this sample, follow the Java setup instructions in the Vision API Quickstart Using Client Libraries . For more information, see the Vision API Java API reference documentation .

/**
 * Detects localized objects in a remote image on Google Cloud Storage.
 *
 * @param gcsPath The path to the remote file on Google Cloud Storage to detect localized objects
 *     on.
 * @param out A {@link PrintStream} to write detected objects to.
 * @throws Exception on errors while closing the client.
 * @throws IOException on Input/Output errors.
 */
public static void detectLocalizedObjectsGcs(String gcsPath, PrintStream out)
    throws Exception, IOException {
  List<AnnotateImageRequest> requests = new ArrayList<>();

  ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
  Image img = Image.newBuilder().setSource(imgSource).build();

  AnnotateImageRequest request =
      AnnotateImageRequest.newBuilder()
          .addFeatures(Feature.newBuilder().setType(Type.OBJECT_LOCALIZATION))
          .setImage(img)
          .build();
  requests.add(request);

  // Perform the request
  try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
    BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
    List<AnnotateImageResponse> responses = response.getResponsesList();
    client.close();
    // Display the results
    for (AnnotateImageResponse res : responses) {
      for (LocalizedObjectAnnotation entity : res.getLocalizedObjectAnnotationsList()) {
        out.format("Object name: %s\n", entity.getName());
        out.format("Confidence: %s\n", entity.getScore());
        out.format("Normalized Vertices:\n");
        entity
            .getBoundingPoly()
            .getNormalizedVerticesList()
            .forEach(vertex -> out.format("- (%s, %s)\n", vertex.getX(), vertex.getY()));
      }
    }
  }
}

Node.js

Before trying this sample, follow the Node.js setup instructions in the Vision API Quickstart Using Client Libraries . For more information, see the Vision API Node.js API reference documentation .

// Imports the Google Cloud client libraries
const vision = require('@google-cloud/vision');

// Creates a client
const client = new vision.ImageAnnotatorClient();

/**
 * TODO(developer): Uncomment the following line before running the sample.
 */
// const gcsUri = `gs://bucket/bucketImage.png`;

client
  .objectLocalization(gcsUri)
  .then(results => {
    const objects = results[0].localizedObjectAnnotations;
    objects.forEach(object => {
      console.log(`Name: ${object.name}`);
      console.log(`Confidence: ${object.score}`);
      const veritices = object.boundingPoly.normalizedVertices;
      veritices.forEach(v => console.log(`x: ${v.x}, y:${v.y}`));
    });
  })
  .catch(err => {
    console.error('ERROR:', err);
  });

PHP

Before trying this sample, follow the PHP setup instructions in the Vision API Quickstart Using Client Libraries . For more information, see the Vision API PHP API reference documentation .

namespace Google\Cloud\Samples\Vision;

use Google\Cloud\Vision\V1\ImageAnnotatorClient;

// $path = 'gs://path/to/your/image.jpg'

function detect_object_gcs($path)
{
    $imageAnnotator = new ImageAnnotatorClient();

    # annotate the image
    $response = $imageAnnotator->objectLocalization($path);
    $objects = $response->getLocalizedObjectAnnotations();

    foreach ($objects as $object) {
        $name = $object->getName();
        $score = $object->getScore();
        $vertices = $object->getBoundingPoly()->getNormalizedVertices();

        printf('%s (confidence %d)):' . PHP_EOL, $name, $score);
        print('normalized bounding polygon vertices: ');
        foreach ($vertices as $vertex) {
            printf(' (%d, %d)', $vertex->getX(), $vertex->getY());
        }
        print(PHP_EOL);
    }

    $imageAnnotator->close();
}

Python

Before you can run the code in this example, you must install the Vision Client Library library.

The following example shows how to call the images:annotate method and specify request object localization.

def localize_objects_uri(uri):
    """Localize objects in the image on Google Cloud Storage

    Args:
    uri: The path to the file in Google Cloud Storage (gs://...)
    """
    from google.cloud import vision
    client = vision.ImageAnnotatorClient()

    image = vision.types.Image()
    image.source.image_uri = uri

    objects = client.object_localization(
        image=image).localized_object_annotations

    print('Number of objects found: {}'.format(len(objects)))
    for object_ in objects:
        print('\n{} (confidence: {})'.format(object_.name, object_.score))
        print('Normalized bounding polygon vertices: ')
        for vertex in object_.bounding_poly.normalized_vertices:
            print(' - ({}, {})'.format(vertex.x, vertex.y))

Ruby

Before trying this sample, follow the Ruby setup instructions in the Vision API Quickstart Using Client Libraries . For more information, see the Vision API Ruby API reference documentation .

# image_path = "Google Cloud Storage URI, eg. 'gs://my-bucket/image.png'"

require "google/cloud/vision"

vision = Google::Cloud::Vision.new
image  = vision.image image_path

image.object_localizations.each do |object|
  puts "#{object.name} (confidence: #{object.score})"
  puts "Normalized bounding polygon vertices:"
  object.bounds.each do |vertex|
    puts " - (#{vertex.x}, #{vertex.y})"
  end
end

OBJECT_LOCALIZATION Response

The JSON response from an Object Localization request is in the following format:

{
  "responses": [
    {
      "localizedObjectAnnotations": [
        {
          "mid": "/m/01bqk0",
          "name": "Bicycle wheel",
          "score": 0.89648587,
          "boundingPoly": {
            "normalizedVertices": [
              {
                "x": 0.32076266,
                "y": 0.78941387
              },
              {
                "x": 0.43812272,
                "y": 0.78941387
              },
              {
                "x": 0.43812272,
                "y": 0.97331065
              },
              {
                "x": 0.32076266,
                "y": 0.97331065
              }
            ]
          }
        },
        {
          "mid": "/m/0199g",
          "name": "Bicycle",
          "score": 0.886761,
          "boundingPoly": {
            "normalizedVertices": [
              {
                "x": 0.312,
                "y": 0.6616471
              },
              {
                "x": 0.638353,
                "y": 0.6616471
              },
              {
                "x": 0.638353,
                "y": 0.9705882
              },
              {
                "x": 0.312,
                "y": 0.9705882
              }
            ]
          }
        },
        {
          "mid": "/m/01bqk0",
          "name": "Bicycle wheel",
          "score": 0.6345275,
          "boundingPoly": {
            "normalizedVertices": [
              {
                "x": 0.5125398,
                "y": 0.760708
              },
              {
                "x": 0.6256646,
                "y": 0.760708
              },
              {
                "x": 0.6256646,
                "y": 0.94601655
              },
              {
                "x": 0.5125398,
                "y": 0.94601655
              }
            ]
          }
        },
        {
          "mid": "/m/06z37_",
          "name": "Picture frame",
          "score": 0.6207608,
          "boundingPoly": {
            "normalizedVertices": [
              {
                "x": 0.79177403,
                "y": 0.16160682
              },
              {
                "x": 0.97047985,
                "y": 0.16160682
              },
              {
                "x": 0.97047985,
                "y": 0.31348917
              },
              {
                "x": 0.79177403,
                "y": 0.31348917
              }
            ]
          }
        },
        {
          "mid": "/m/0h9mv",
          "name": "Tire",
          "score": 0.55886006,
          "boundingPoly": {
            "normalizedVertices": [
              {
                "x": 0.32076266,
                "y": 0.78941387
              },
              {
                "x": 0.43812272,
                "y": 0.78941387
              },
              {
                "x": 0.43812272,
                "y": 0.97331065
              },
              {
                "x": 0.32076266,
                "y": 0.97331065
              }
            ]
          }
        },
        {
          "mid": "/m/02dgv",
          "name": "Door",
          "score": 0.5160098,
          "boundingPoly": {
            "normalizedVertices": [
              {
                "x": 0.77569866,
                "y": 0.37104446
              },
              {
                "x": 0.9412425,
                "y": 0.37104446
              },
              {
                "x": 0.9412425,
                "y": 0.81507325
              },
              {
                "x": 0.77569866,
                "y": 0.81507325
              }
            ]
          }
        }
      ]
    }
  ]
}
Was this page helpful? Let us know how we did:

Send feedback about...

Cloud Vision API Documentation
Need help? Visit our support page.