Detecting Faces

Face Detection detects multiple faces within an image along with the associated key facial attributes such as emotional state or wearing headwear. Facial Recognition is not supported.

Detecting Faces in a local image

Protocol

Refer to the images:annotate API endpoint for complete details.

To perform Face Detection, make a POST request and provide the appropriate request body:

POST https://vision.googleapis.com/v1/images:annotate?key=YOUR_API_KEY
{
  "requests": [
    {
      "images": {
        "content": "/9j/7QBEUGhvdG9zaG9...base64-encoded-image-content...fXNWzvDEeYxxxzj/Coa6Bax//Z"
      },
      "features": [
        {
          "type": "FACE_DETECTION"
        }
      ]
    }
  ]
}

See the AnnotateImageRequest reference documentation for more information on configuring the request body.

If the request is successful, the server returns a 200 OK HTTP status code and the response in JSON format:

{
  "responses":[
    {
      "faceAnnotations":[
        {
          "boundingPoly":{
            "vertices":[
              {
                "x":1916,
                "y":870
              },
              {
                "x":2106,
                "y":870
              },
              {
                "x":2106,
                "y":1091
              },
              {
                "x":1916,
                "y":1091
              }
            ]
          },
          "fdBoundingPoly":{
            "vertices":[
              {
                "x":1923,
                "y":910
              },
              {
                "x":2081,
                "y":910
              },
              {
                "x":2081,
                "y":1068
              },
              {
                "x":1923,
                "y":1068
              }
            ]
          },
          "landmarks":[
            {
              "type":"LEFT_EYE",
              "position":{
                "x":1969.4862,
                "y":955.17334,
                "z":-0.0016533856
              }
            },
            {
              "type":"RIGHT_EYE",
              "position":{
                "x":2019.262,
                "y":967.91278,
                "z":-28.925787
              }
            },
            {
              "type":"LEFT_OF_LEFT_EYEBROW",
              "position":{
                "x":1959.3801,
                "y":939.696,
                "z":14.981886
              }
            },
            {
              "type":"RIGHT_OF_LEFT_EYEBROW",
              "position":{
                "x":1980.2725,
                "y":943.3717,
                "z":-15.975462
              }
            },
            {
              "type":"LEFT_OF_RIGHT_EYEBROW",
              "position":{
                "x":2003.1469,
                "y":948.81323,
                "z":-29.651102
              }
            },
            {
              "type":"RIGHT_OF_RIGHT_EYEBROW",
              "position":{
                "x":2040.1477,
                "y":961.9339,
                "z":-32.134441
              }
            },
            {
              "type":"MIDPOINT_BETWEEN_EYES",
              "position":{
                "x":1987.4386,
                "y":956.79248,
                "z":-24.352777
              }
            },
            {
              "type":"NOSE_TIP",
              "position":{
                "x":1969.6227,
                "y":985.49719,
                "z":-41.193481
              }
            },
            {
              "type":"UPPER_LIP",
              "position":{
                "x":1972.1095,
                "y":1007.2608,
                "z":-30.672895
              }
            },
            {
              "type":"LOWER_LIP",
              "position":{
                "x":1968.515,
                "y":1027.6235,
                "z":-28.315508
              }
            },
            {
              "type":"MOUTH_LEFT",
              "position":{
                "x":1957.8792,
                "y":1013.6796,
                "z":-6.6342912
              }
            },
            {
              "type":"MOUTH_RIGHT",
              "position":{
                "x":1998.7747,
                "y":1022.9999,
                "z":-28.734522
              }
            },
            {
              "type":"MOUTH_CENTER",
              "position":{
                "x":1971.396,
                "y":1017.4032,
                "z":-27.534792
              }
            },
            {
              "type":"NOSE_BOTTOM_RIGHT",
              "position":{
                "x":1993.8416,
                "y":995.19,
                "z":-29.759504
              }
            },
            {
              "type":"NOSE_BOTTOM_LEFT",
              "position":{
                "x":1965.5908,
                "y":989.42383,
                "z":-13.663703
              }
            },
            {
              "type":"NOSE_BOTTOM_CENTER",
              "position":{
                "x":1974.8154,
                "y":995.68555,
                "z":-30.112482
              }
            },
            {
              "type":"LEFT_EYE_TOP_BOUNDARY",
              "position":{
                "x":1968.6737,
                "y":950.9704,
                "z":-3.0559144
              }
            },
            {
              "type":"LEFT_EYE_RIGHT_CORNER",
              "position":{
                "x":1978.8079,
                "y":958.23712,
                "z":-5.4053364
              }
            },
            {
              "type":"LEFT_EYE_BOTTOM_BOUNDARY",
              "position":{
                "x":1967.8793,
                "y":959.22345,
                "z":-0.62461489
              }
            },
            {
              "type":"LEFT_EYE_LEFT_CORNER",
              "position":{
                "x":1962.1622,
                "y":954.26093,
                "z":10.204804
              }
            },
            {
              "type":"LEFT_EYE_PUPIL",
              "position":{
                "x":1967.9233,
                "y":954.9704,
                "z":-0.77994776
              }
            },
            {
              "type":"RIGHT_EYE_TOP_BOUNDARY",
              "position":{
                "x":2016.6268,
                "y":962.88623,
                "z":-31.205936
              }
            },
            {
              "type":"RIGHT_EYE_RIGHT_CORNER",
              "position":{
                "x":2029.2314,
                "y":970.985,
                "z":-29.216293
              }
            },
            {
              "type":"RIGHT_EYE_BOTTOM_BOUNDARY",
              "position":{
                "x":2017.429,
                "y":972.17621,
                "z":-28.954475
              }
            },
            {
              "type":"RIGHT_EYE_LEFT_CORNER",
              "position":{
                "x":2007.4708,
                "y":965.36237,
                "z":-22.286636
              }
            },
            {
              "type":"RIGHT_EYE_PUPIL",
              "position":{
                "x":2017.0439,
                "y":967.18329,
                "z":-29.732374
              }
            },
            {
              "type":"LEFT_EYEBROW_UPPER_MIDPOINT",
              "position":{
                "x":1969.7963,
                "y":934.11523,
                "z":-3.3017645
              }
            },
            {
              "type":"RIGHT_EYEBROW_UPPER_MIDPOINT",
              "position":{
                "x":2021.7909,
                "y":947.04419,
                "z":-33.841984
              }
            },
            {
              "type":"LEFT_EAR_TRAGION",
              "position":{
                "x":1963.6063,
                "y":987.89252,
                "z":77.398705
              }
            },
            {
              "type":"RIGHT_EAR_TRAGION",
              "position":{
                "x":2075.2998,
                "y":1016.2071,
                "z":13.859237
              }
            },
            {
              "type":"FOREHEAD_GLABELLA",
              "position":{
                "x":1991.0243,
                "y":945.11224,
                "z":-24.655386
              }
            },
            {
              "type":"CHIN_GNATHION",
              "position":{
                "x":1964.3625,
                "y":1055.4045,
                "z":-23.147352
              }
            },
            {
              "type":"CHIN_LEFT_GONION",
              "position":{
                "x":1948.226,
                "y":1019.5986,
                "z":52.048538
              }
            },
            {
              "type":"CHIN_RIGHT_GONION",
              "position":{
                "x":2046.8456,
                "y":1044.8068,
                "z":-6.1001
              }
            }
          ],
          "rollAngle":16.066454,
          "panAngle":-29.752207,
          "tiltAngle":3.7352962,
          "detectionConfidence":0.98736823,
          "landmarkingConfidence":0.57041687,
          "joyLikelihood":0.90647823,
          "sorrowLikelihood":4.1928422e-05,
          "angerLikelihood":0.00033951481,
          "surpriseLikelihood":0.0024809798,
          "underExposedLikelihood":3.5745124e-06,
          "blurredLikelihood":0.00038755304,
          "headwearLikelihood":1.1718362e-05
        }
      ]
    }
  ]
}

C#

For more on installing and creating a Cloud Vision API client, refer to Cloud Vision API Client Libraries.

private static object DetectFaces(string filePath)
{
    var client = ImageAnnotatorClient.Create();
    var image = Image.FromFile(filePath);
    var response = client.DetectFaces(image);
    int count = 1;
    foreach (var faceAnnotation in response)
    {
        Console.WriteLine("Face {0}:", count++);
        Console.WriteLine("  Joy: {0}", faceAnnotation.JoyLikelihood);
        Console.WriteLine("  Anger: {0}", faceAnnotation.AngerLikelihood);
        Console.WriteLine("  Sorrow: {0}", faceAnnotation.SorrowLikelihood);
        Console.WriteLine("  Surprise: {0}", faceAnnotation.SurpriseLikelihood);
    }
    return 0;
}

Go

For more on installing and creating a Cloud Vision API client, refer to Cloud Vision API Client Libraries.

// detectFaces gets faces from the Vision API for an image at the given file path.
func detectFaces(w io.Writer, file string) error {
	ctx := context.Background()

	client, err := vision.NewClient(ctx)
	if err != nil {
		return err
	}

	f, err := os.Open(file)
	if err != nil {
		return err
	}
	defer f.Close()

	image, err := vision.NewImageFromReader(f)
	if err != nil {
		return err
	}
	annotations, err := client.DetectFaces(ctx, image, 10)
	if err != nil {
		return err
	}

	if len(annotations) == 0 {
		fmt.Fprintln(w, "No faces found.")
	} else {
		fmt.Fprintln(w, "Faces:")
		for i, annotation := range annotations {
			fmt.Fprintln(w, "  Face", i)
			fmt.Fprintln(w, "    Anger:", annotation.Likelihoods.Anger)
			fmt.Fprintln(w, "    Joy:", annotation.Likelihoods.Joy)
			fmt.Fprintln(w, "    Surprise:", annotation.Likelihoods.Surprise)
		}
	}

	return nil
}

Java

For more on installing and creating a Cloud Vision API client, refer to Cloud Vision API Client Libraries.

public static void detectFaces(String filePath, PrintStream out) throws IOException {
  List<AnnotateImageRequest> requests = new ArrayList<>();

  ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));

  Image img = Image.newBuilder().setContent(imgBytes).build();
  Feature feat = Feature.newBuilder().setType(Type.FACE_DETECTION).build();
  AnnotateImageRequest request =
      AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
  requests.add(request);

  BatchAnnotateImagesResponse response =
      ImageAnnotatorClient.create().batchAnnotateImages(requests);
  List<AnnotateImageResponse> responses = response.getResponsesList();

  for (AnnotateImageResponse res : responses) {
    if (res.hasError()) {
      out.printf("Error: %s\n", res.getError().getMessage());
      return;
    }

    // For full list of available annotations, see http://g.co/cloud/vision/docs
    for (FaceAnnotation annotation : res.getFaceAnnotationsList()) {
      out.printf(
          "anger: %s\njoy: %s\nsurprise: %s\nposition: %s",
          annotation.getAngerLikelihood(),
          annotation.getJoyLikelihood(),
          annotation.getSurpriseLikelihood(),
          annotation.getBoundingPoly());
    }
  }
}

Node.js

For more on installing and creating a Cloud Vision API client, refer to Cloud Vision API Client Libraries.

// Imports the Google Cloud client library
const Vision = require('@google-cloud/vision');

// Instantiates a client
const vision = Vision();

// The path to the local image file, e.g. "/path/to/image.png"
// const fileName = '/path/to/image.png';

// Performs face detection on the local file
vision.detectFaces(fileName)
  .then((results) => {
    const faces = results[0];

    console.log('Faces:');
    faces.forEach((face, i) => {
      console.log(`  Face #${i + 1}:`);
      console.log(`    Joy: ${face.joy}`);
      console.log(`    Anger: ${face.anger}`);
      console.log(`    Sorrow: ${face.sorrow}`);
      console.log(`    Surprise: ${face.surprise}`);
    });
  });

PHP

For more on installing and creating a Cloud Vision API client, refer to Cloud Vision API Client Libraries.

use Google\Cloud\Vision\VisionClient;

// $projectId = 'YOUR_PROJECT_ID';
// $path = 'path/to/your/image.jpg'

$vision = new VisionClient([
    'projectId' => $projectId,
]);
$image = $vision->image(file_get_contents($path), ['FACE_DETECTION']);
$result = $vision->annotate($image);
print("Faces:\n");
foreach ((array) $result->faces() as $face) {
    printf("Anger: %s\n", $face->isAngry() ? 'yes' : 'no');
    printf("Joy: %s\n", $face->isJoyful() ? 'yes' : 'no');
    printf("Surprise: %s\n\n", $face->isSurprised() ? 'yes' : 'no');
}

Python

For more on installing and creating a Cloud Vision API client, refer to Cloud Vision API Client Libraries.

def detect_faces(path):
    """Detects faces in an image."""
    vision_client = vision.Client()

    with io.open(path, 'rb') as image_file:
        content = image_file.read()

    image = vision_client.image(content=content)

    faces = image.detect_faces()
    print('Faces:')

    for face in faces:
        print('anger: {}'.format(face.emotions.anger))
        print('joy: {}'.format(face.emotions.joy))
        print('surprise: {}'.format(face.emotions.surprise))

        vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate)
                    for bound in face.bounds.vertices])

        print('face bounds: {}'.format(','.join(vertices)))

Detecting Faces in a remote image

For your convenience, the Cloud Vision API can perform Face Detection directly on an image file located in Google Cloud Storage or on the Web without the need to send the contents of the image file in the body of your request.

Protocol

Refer to the images:annotate API endpoint for complete details.

To perform Face Detection, make a POST request and provide the appropriate request body:

POST https://vision.googleapis.com/v1/images:annotate?key=YOUR_API_KEY
{
  "requests": [
    {
      "images": {
        "source": {
          "gcsImageUri": "gs://YOUR_BUCKET_NAME/YOUR_FILE_NAME"
        }
      },
      "features": [
        {
          "type": "FACE_DETECTION"
        }
      ]
    }
  ]
}

See the AnnotateImageRequest reference documentation for more information on configuring the request body.

If the request is successful, the server returns a 200 OK HTTP status code and the response in JSON format:

{
  "responses":[
    {
      "faceAnnotations":[
        {
          "boundingPoly":{
            "vertices":[
              {
                "x":1916,
                "y":870
              },
              {
                "x":2106,
                "y":870
              },
              {
                "x":2106,
                "y":1091
              },
              {
                "x":1916,
                "y":1091
              }
            ]
          },
          "fdBoundingPoly":{
            "vertices":[
              {
                "x":1923,
                "y":910
              },
              {
                "x":2081,
                "y":910
              },
              {
                "x":2081,
                "y":1068
              },
              {
                "x":1923,
                "y":1068
              }
            ]
          },
          "landmarks":[
            {
              "type":"LEFT_EYE",
              "position":{
                "x":1969.4862,
                "y":955.17334,
                "z":-0.0016533856
              }
            },
            {
              "type":"RIGHT_EYE",
              "position":{
                "x":2019.262,
                "y":967.91278,
                "z":-28.925787
              }
            },
            {
              "type":"LEFT_OF_LEFT_EYEBROW",
              "position":{
                "x":1959.3801,
                "y":939.696,
                "z":14.981886
              }
            },
            {
              "type":"RIGHT_OF_LEFT_EYEBROW",
              "position":{
                "x":1980.2725,
                "y":943.3717,
                "z":-15.975462
              }
            },
            {
              "type":"LEFT_OF_RIGHT_EYEBROW",
              "position":{
                "x":2003.1469,
                "y":948.81323,
                "z":-29.651102
              }
            },
            {
              "type":"RIGHT_OF_RIGHT_EYEBROW",
              "position":{
                "x":2040.1477,
                "y":961.9339,
                "z":-32.134441
              }
            },
            {
              "type":"MIDPOINT_BETWEEN_EYES",
              "position":{
                "x":1987.4386,
                "y":956.79248,
                "z":-24.352777
              }
            },
            {
              "type":"NOSE_TIP",
              "position":{
                "x":1969.6227,
                "y":985.49719,
                "z":-41.193481
              }
            },
            {
              "type":"UPPER_LIP",
              "position":{
                "x":1972.1095,
                "y":1007.2608,
                "z":-30.672895
              }
            },
            {
              "type":"LOWER_LIP",
              "position":{
                "x":1968.515,
                "y":1027.6235,
                "z":-28.315508
              }
            },
            {
              "type":"MOUTH_LEFT",
              "position":{
                "x":1957.8792,
                "y":1013.6796,
                "z":-6.6342912
              }
            },
            {
              "type":"MOUTH_RIGHT",
              "position":{
                "x":1998.7747,
                "y":1022.9999,
                "z":-28.734522
              }
            },
            {
              "type":"MOUTH_CENTER",
              "position":{
                "x":1971.396,
                "y":1017.4032,
                "z":-27.534792
              }
            },
            {
              "type":"NOSE_BOTTOM_RIGHT",
              "position":{
                "x":1993.8416,
                "y":995.19,
                "z":-29.759504
              }
            },
            {
              "type":"NOSE_BOTTOM_LEFT",
              "position":{
                "x":1965.5908,
                "y":989.42383,
                "z":-13.663703
              }
            },
            {
              "type":"NOSE_BOTTOM_CENTER",
              "position":{
                "x":1974.8154,
                "y":995.68555,
                "z":-30.112482
              }
            },
            {
              "type":"LEFT_EYE_TOP_BOUNDARY",
              "position":{
                "x":1968.6737,
                "y":950.9704,
                "z":-3.0559144
              }
            },
            {
              "type":"LEFT_EYE_RIGHT_CORNER",
              "position":{
                "x":1978.8079,
                "y":958.23712,
                "z":-5.4053364
              }
            },
            {
              "type":"LEFT_EYE_BOTTOM_BOUNDARY",
              "position":{
                "x":1967.8793,
                "y":959.22345,
                "z":-0.62461489
              }
            },
            {
              "type":"LEFT_EYE_LEFT_CORNER",
              "position":{
                "x":1962.1622,
                "y":954.26093,
                "z":10.204804
              }
            },
            {
              "type":"LEFT_EYE_PUPIL",
              "position":{
                "x":1967.9233,
                "y":954.9704,
                "z":-0.77994776
              }
            },
            {
              "type":"RIGHT_EYE_TOP_BOUNDARY",
              "position":{
                "x":2016.6268,
                "y":962.88623,
                "z":-31.205936
              }
            },
            {
              "type":"RIGHT_EYE_RIGHT_CORNER",
              "position":{
                "x":2029.2314,
                "y":970.985,
                "z":-29.216293
              }
            },
            {
              "type":"RIGHT_EYE_BOTTOM_BOUNDARY",
              "position":{
                "x":2017.429,
                "y":972.17621,
                "z":-28.954475
              }
            },
            {
              "type":"RIGHT_EYE_LEFT_CORNER",
              "position":{
                "x":2007.4708,
                "y":965.36237,
                "z":-22.286636
              }
            },
            {
              "type":"RIGHT_EYE_PUPIL",
              "position":{
                "x":2017.0439,
                "y":967.18329,
                "z":-29.732374
              }
            },
            {
              "type":"LEFT_EYEBROW_UPPER_MIDPOINT",
              "position":{
                "x":1969.7963,
                "y":934.11523,
                "z":-3.3017645
              }
            },
            {
              "type":"RIGHT_EYEBROW_UPPER_MIDPOINT",
              "position":{
                "x":2021.7909,
                "y":947.04419,
                "z":-33.841984
              }
            },
            {
              "type":"LEFT_EAR_TRAGION",
              "position":{
                "x":1963.6063,
                "y":987.89252,
                "z":77.398705
              }
            },
            {
              "type":"RIGHT_EAR_TRAGION",
              "position":{
                "x":2075.2998,
                "y":1016.2071,
                "z":13.859237
              }
            },
            {
              "type":"FOREHEAD_GLABELLA",
              "position":{
                "x":1991.0243,
                "y":945.11224,
                "z":-24.655386
              }
            },
            {
              "type":"CHIN_GNATHION",
              "position":{
                "x":1964.3625,
                "y":1055.4045,
                "z":-23.147352
              }
            },
            {
              "type":"CHIN_LEFT_GONION",
              "position":{
                "x":1948.226,
                "y":1019.5986,
                "z":52.048538
              }
            },
            {
              "type":"CHIN_RIGHT_GONION",
              "position":{
                "x":2046.8456,
                "y":1044.8068,
                "z":-6.1001
              }
            }
          ],
          "rollAngle":16.066454,
          "panAngle":-29.752207,
          "tiltAngle":3.7352962,
          "detectionConfidence":0.98736823,
          "landmarkingConfidence":0.57041687,
          "joyLikelihood":0.90647823,
          "sorrowLikelihood":4.1928422e-05,
          "angerLikelihood":0.00033951481,
          "surpriseLikelihood":0.0024809798,
          "underExposedLikelihood":3.5745124e-06,
          "blurredLikelihood":0.00038755304,
          "headwearLikelihood":1.1718362e-05
        }
      ]
    }
  ]
}

C#

For more on installing and creating a Cloud Vision API client, refer to Cloud Vision API Client Libraries.

private static object DetectFaces(string bucketName, string objectName)
{
    var client = ImageAnnotatorClient.Create();
    var image = Image.FromUri($"gs://{bucketName}/{objectName}");
    var response = client.DetectFaces(image);
    int count = 1;
    foreach (var faceAnnotation in response)
    {
        Console.WriteLine("Face {0}:", count++);
        Console.WriteLine("  Joy: {0}", faceAnnotation.JoyLikelihood);
        Console.WriteLine("  Anger: {0}", faceAnnotation.AngerLikelihood);
        Console.WriteLine("  Sorrow: {0}", faceAnnotation.SorrowLikelihood);
        Console.WriteLine("  Surprise: {0}", faceAnnotation.SurpriseLikelihood);
    }
    return 0;
}

Go

For more on installing and creating a Cloud Vision API client, refer to Cloud Vision API Client Libraries.

// detectFaces gets faces from the Vision API for an image at the given file path.
func detectFacesURI(w io.Writer, file string) error {
	ctx := context.Background()

	client, err := vision.NewClient(ctx)
	if err != nil {
		return err
	}

	image := vision.NewImageFromURI(file)
	annotations, err := client.DetectFaces(ctx, image, 10)
	if err != nil {
		return err
	}

	if len(annotations) == 0 {
		fmt.Fprintln(w, "No faces found.")
	} else {
		fmt.Fprintln(w, "Faces:")
		for i, annotation := range annotations {
			fmt.Fprintln(w, "  Face", i)
			fmt.Fprintln(w, "    Anger:", annotation.Likelihoods.Anger)
			fmt.Fprintln(w, "    Joy:", annotation.Likelihoods.Joy)
			fmt.Fprintln(w, "    Surprise:", annotation.Likelihoods.Surprise)
		}
	}

	return nil
}

Java

For more on installing and creating a Cloud Vision API client, refer to Cloud Vision API Client Libraries.

public static void detectFacesGcs(String gcsPath, PrintStream out) throws IOException {
  List<AnnotateImageRequest> requests = new ArrayList<>();

  ImageAnnotatorSettings.Builder imageAnnotatorSettingsBuilder =
      ImageAnnotatorSettings.defaultBuilder();
  imageAnnotatorSettingsBuilder
      .batchAnnotateImagesSettings()
      .getRetrySettingsBuilder()
      .setTotalTimeout(Duration.standardSeconds(30));
  ImageAnnotatorSettings settings = imageAnnotatorSettingsBuilder.build();

  ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
  Image img = Image.newBuilder().setSource(imgSource).build();
  Feature feat = Feature.newBuilder().setType(Type.FACE_DETECTION).build();

  AnnotateImageRequest request =
      AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
  requests.add(request);

  ImageAnnotatorClient client = ImageAnnotatorClient.create(settings);
  BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
  List<AnnotateImageResponse> responses = response.getResponsesList();

  for (AnnotateImageResponse res : responses) {
    if (res.hasError()) {
      out.printf("Error: %s\n", res.getError().getMessage());
      return;
    }

    // For full list of available annotations, see http://g.co/cloud/vision/docs
    for (FaceAnnotation annotation : res.getFaceAnnotationsList()) {
      out.printf(
          "anger: %s\njoy: %s\nsurprise: %s\nposition: %s",
          annotation.getAngerLikelihood(),
          annotation.getJoyLikelihood(),
          annotation.getSurpriseLikelihood(),
          annotation.getBoundingPoly());
    }
  }
}

Node.js

For more on installing and creating a Cloud Vision API client, refer to Cloud Vision API Client Libraries.

// Imports the Google Cloud client libraries
const Storage = require('@google-cloud/storage');
const Vision = require('@google-cloud/vision');

// Instantiates clients
const storage = Storage();
const vision = Vision();

// The name of the bucket where the file resides, e.g. "my-bucket"
// const bucketName = 'my-bucket';

// The path to the file within the bucket, e.g. "path/to/image.png"
// const fileName = 'path/to/image.png';

// Performs face detection on the remote file
vision.detectFaces(storage.bucket(bucketName).file(fileName))
  .then((results) => {
    const faces = results[0];

    console.log('Faces:');
    faces.forEach((face, i) => {
      console.log(`  Face #${i + 1}:`);
      console.log(`    Joy: ${face.joy}`);
      console.log(`    Anger: ${face.anger}`);
      console.log(`    Sorrow: ${face.sorrow}`);
      console.log(`    Surprise: ${face.surprise}`);
    });
  });

PHP

For more on installing and creating a Cloud Vision API client, refer to Cloud Vision API Client Libraries.

use Google\Cloud\ServiceBuilder;

// $projectId = 'YOUR_PROJECT_ID';
// $bucketName = 'your-bucket-name'
// $objectName = 'your-object-name'

$builder = new ServiceBuilder([
    'projectId' => $projectId,
]);
$vision = $builder->vision();
$storage = $builder->storage();

// fetch the storage object and annotate the image
$object = $storage->bucket($bucketName)->object($objectName);
$image = $vision->image($object, ['FACE_DETECTION']);
$result = $vision->annotate($image);

// print the response
print("Faces:\n");
foreach ((array) $result->faces() as $face) {
    printf("Anger: %s\n", $face->isAngry() ? 'yes' : 'no');
    printf("Joy: %s\n", $face->isJoyful() ? 'yes' : 'no');
    printf("Surprise: %s\n\n", $face->isSurprised() ? 'yes' : 'no');
}

Python

For more on installing and creating a Cloud Vision API client, refer to Cloud Vision API Client Libraries.

def detect_faces_uri(uri):
    """Detects faces in the file located in Google Cloud Storage or the web."""
    vision_client = vision.Client()
    image = vision_client.image(source_uri=uri)

    faces = image.detect_faces()
    print('Faces:')

    for face in faces:
        print('anger: {}'.format(face.emotions.anger))
        print('joy: {}'.format(face.emotions.joy))
        print('surprise: {}'.format(face.emotions.surprise))

        vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate)
                    for bound in face.bounds.vertices])

        print('face bounds: {}'.format(','.join(vertices)))

Send feedback about...

Google Cloud Vision API Documentation