顔の検出

顔検出は、画像に含まれる複数の人物の顔を検出します。感情や wearing headwear といった関連する主要な顔の属性についても識別します。 ただし、個人を特定する顔認識には対応していません。

ローカル画像での顔の検出

プロトコル

詳しくは、images:annotate API エンドポイントをご覧ください。

顔検出を行うには、POST リクエストを作成し、適切なリクエスト本文を指定します。

POST https://vision.googleapis.com/v1/images:annotate?key=YOUR_API_KEY
{
  "requests": [
    {
      "image": {
        "content": "/9j/7QBEUGhvdG9zaG9...base64-encoded-image-content...fXNWzvDEeYxxxzj/Coa6Bax//Z"
      },
      "features": [
        {
          "type": "FACE_DETECTION"
        }
      ]
    }
  ]
}

リクエスト本文の構成について詳しくは、AnnotateImageRequest のリファレンス ドキュメントをご覧ください。

C#

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

// Load an image from a local file.
var image = Image.FromFile(filePath);
var client = ImageAnnotatorClient.Create();
var response = client.DetectFaces(image);
int count = 1;
foreach (var faceAnnotation in response)
{
    Console.WriteLine("Face {0}:", count++);
    Console.WriteLine("  Joy: {0}", faceAnnotation.JoyLikelihood);
    Console.WriteLine("  Anger: {0}", faceAnnotation.AngerLikelihood);
    Console.WriteLine("  Sorrow: {0}", faceAnnotation.SorrowLikelihood);
    Console.WriteLine("  Surprise: {0}", faceAnnotation.SurpriseLikelihood);
}

Go

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

// detectFaces gets faces from the Vision API for an image at the given file path.
func detectFaces(w io.Writer, file string) error {
	ctx := context.Background()

	client, err := vision.NewImageAnnotatorClient(ctx)
	if err != nil {
		return err
	}

	f, err := os.Open(file)
	if err != nil {
		return err
	}
	defer f.Close()

	image, err := vision.NewImageFromReader(f)
	if err != nil {
		return err
	}
	annotations, err := client.DetectFaces(ctx, image, nil, 10)
	if err != nil {
		return err
	}
	if len(annotations) == 0 {
		fmt.Fprintln(w, "No faces found.")
	} else {
		fmt.Fprintln(w, "Faces:")
		for i, annotation := range annotations {
			fmt.Fprintln(w, "  Face", i)
			fmt.Fprintln(w, "    Anger:", annotation.AngerLikelihood)
			fmt.Fprintln(w, "    Joy:", annotation.JoyLikelihood)
			fmt.Fprintln(w, "    Surprise:", annotation.SurpriseLikelihood)
		}
	}
	return nil
}

Java

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

public static void detectFaces(String filePath, PrintStream out) throws Exception, IOException {
  List<AnnotateImageRequest> requests = new ArrayList<>();

  ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));

  Image img = Image.newBuilder().setContent(imgBytes).build();
  Feature feat = Feature.newBuilder().setType(Type.FACE_DETECTION).build();
  AnnotateImageRequest request =
      AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
  requests.add(request);

  try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
    BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
    List<AnnotateImageResponse> responses = response.getResponsesList();

    for (AnnotateImageResponse res : responses) {
      if (res.hasError()) {
        out.printf("Error: %s\n", res.getError().getMessage());
        return;
      }

      // For full list of available annotations, see http://g.co/cloud/vision/docs
      for (FaceAnnotation annotation : res.getFaceAnnotationsList()) {
        out.printf(
            "anger: %s\njoy: %s\nsurprise: %s\nposition: %s",
            annotation.getAngerLikelihood(),
            annotation.getJoyLikelihood(),
            annotation.getSurpriseLikelihood(),
            annotation.getBoundingPoly());
      }
    }
  }
}

Node.js

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

// Imports the Google Cloud client library
const vision = require('@google-cloud/vision');

// Creates a client
const client = new vision.ImageAnnotatorClient();

/**
 * TODO(developer): Uncomment the following line before running the sample.
 */
// const fileName = 'Local image file, e.g. /path/to/image.png';

client
  .faceDetection(fileName)
  .then(results => {
    const faces = results[0].faceAnnotations;

    console.log('Faces:');
    faces.forEach((face, i) => {
      console.log(`  Face #${i + 1}:`);
      console.log(`    Joy: ${face.joyLikelihood}`);
      console.log(`    Anger: ${face.angerLikelihood}`);
      console.log(`    Sorrow: ${face.sorrowLikelihood}`);
      console.log(`    Surprise: ${face.surpriseLikelihood}`);
    });
  })
  .catch(err => {
    console.error('ERROR:', err);
  });

PHP

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

namespace Google\Cloud\Samples\Vision;

use Google\Cloud\Vision\V1\ImageAnnotatorClient;

// $path = 'path/to/your/image.jpg'

function detect_face($path, $outFile = null)
{
    $imageAnnotator = new ImageAnnotatorClient();

    # annotate the image
    $image = file_get_contents($path);
    $response = $imageAnnotator->faceDetection($image);
    $faces = $response->getFaceAnnotations();

    # names of likelihood from google.cloud.vision.enums
    $likelihoodName = ['UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY',
    'POSSIBLE','LIKELY', 'VERY_LIKELY'];

    printf("%d faces found:" . PHP_EOL, count($faces));
    foreach ($faces as $face) {
        $anger = $face->getAngerLikelihood();
        printf("Anger: %s" . PHP_EOL, $likelihoodName[$anger]);

        $joy = $face->getJoyLikelihood();
        printf("Joy: %s" . PHP_EOL, $likelihoodName[$joy]);

        $surprise = $face->getSurpriseLikelihood();
        printf("Surprise: %s" . PHP_EOL, $likelihoodName[$surprise]);

        # get bounds
        $vertices = $face->getBoundingPoly()->getVertices();
        $bounds = [];
        foreach ($vertices as $vertex) {
            $bounds[] = sprintf('(%d,%d)', $vertex->getX(), $vertex->getY());
        }
        print('Bounds: ' . join(', ',$bounds) . PHP_EOL);
        print(PHP_EOL);
    }

    # draw box around faces
    if ($faces && $outFile) {
        $imageCreateFunc = [
            'png' => 'imagecreatefrompng',
            'gd' => 'imagecreatefromgd',
            'gif' => 'imagecreatefromgif',
            'jpg' => 'imagecreatefromjpeg',
            'jpeg' => 'imagecreatefromjpeg',
        ];
        $imageWriteFunc = [
            'png' => 'imagepng',
            'gd' => 'imagegd',
            'gif' => 'imagegif',
            'jpg' => 'imagejpeg',
            'jpeg' => 'imagejpeg',
        ];

        copy($path, $outFile);
        $ext = strtolower(pathinfo($path, PATHINFO_EXTENSION));
        if (!array_key_exists($ext, $imageCreateFunc)) {
            throw new \Exception('Unsupported image extension');
        }
        $outputImage = call_user_func($imageCreateFunc[$ext], $outFile);
        foreach ($faces as $face) {
            $vertices = $face->getBoundingPoly()->getVertices();
            if ($vertices) {
                $x1 = $vertices[0]->getX();
                $y1 = $vertices[0]->getY();
                $x2 = $vertices[2]->getX();
                $y2 = $vertices[2]->getY();
                imagerectangle($outputImage, $x1, $y1, $x2, $y2, 0x00ff00);
            }
        }
        call_user_func($imageWriteFunc[$ext], $outputImage, $outFile);
        printf('Output image written to %s' . PHP_EOL, $outFile);
    }
}

Python

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

def detect_faces(path):
    """Detects faces in an image."""
    client = vision.ImageAnnotatorClient()

    with io.open(path, 'rb') as image_file:
        content = image_file.read()

    image = vision.types.Image(content=content)

    response = client.face_detection(image=image)
    faces = response.face_annotations

    # Names of likelihood from google.cloud.vision.enums
    likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
                       'LIKELY', 'VERY_LIKELY')
    print('Faces:')

    for face in faces:
        print('anger: {}'.format(likelihood_name[face.anger_likelihood]))
        print('joy: {}'.format(likelihood_name[face.joy_likelihood]))
        print('surprise: {}'.format(likelihood_name[face.surprise_likelihood]))

        vertices = (['({},{})'.format(vertex.x, vertex.y)
                    for vertex in face.bounding_poly.vertices])

        print('face bounds: {}'.format(','.join(vertices)))

Ruby

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

# project_id = "Your Google Cloud project ID"
# image_path = "Path to local image file, eg. './image.png'"

require "google/cloud/vision"

vision = Google::Cloud::Vision.new project: project_id
image  = vision.image image_path

image.faces.each do |face|
  puts "Joy:      #{face.likelihood.joy?}"
  puts "Anger:    #{face.likelihood.anger?}"
  puts "Sorrow:   #{face.likelihood.sorrow?}"
  puts "Surprise: #{face.likelihood.surprise?}"
end

リモート画像での顔の検出

Vision API は、Google Cloud Storage またはウェブに存在する画像ファイルに対して直接顔検出を実行できるようになっています。その画像ファイルの内容をリクエストの本文に入れて送信する必要はありません。

プロトコル

詳しくは、images:annotate API エンドポイントをご覧ください。

顔検出を行うには、POST リクエストを作成し、適切なリクエスト本文を指定します。

POST https://vision.googleapis.com/v1/images:annotate?key=YOUR_API_KEY
{
  "requests": [
    {
      "image": {
        "source": {
          "gcsImageUri": "gs://YOUR_BUCKET_NAME/YOUR_FILE_NAME"
        }
      },
      "features": [
        {
          "type": "FACE_DETECTION"
        }
      ]
    }
  ]
}

リクエスト本文の構成について詳しくは、AnnotateImageRequest のリファレンス ドキュメントをご覧ください。

C#

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

// Specify a Google Cloud Storage uri for the image
// or a publicly accessible HTTP or HTTPS uri.
var image = Image.FromUri(uri);
var client = ImageAnnotatorClient.Create();
var response = client.DetectFaces(image);
int count = 1;
foreach (var faceAnnotation in response)
{
    Console.WriteLine("Face {0}:", count++);
    Console.WriteLine("  Joy: {0}", faceAnnotation.JoyLikelihood);
    Console.WriteLine("  Anger: {0}", faceAnnotation.AngerLikelihood);
    Console.WriteLine("  Sorrow: {0}", faceAnnotation.SorrowLikelihood);
    Console.WriteLine("  Surprise: {0}", faceAnnotation.SurpriseLikelihood);
}

Go

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

// detectFaces gets faces from the Vision API for an image at the given file path.
func detectFacesURI(w io.Writer, file string) error {
	ctx := context.Background()

	client, err := vision.NewImageAnnotatorClient(ctx)
	if err != nil {
		return err
	}

	image := vision.NewImageFromURI(file)
	annotations, err := client.DetectFaces(ctx, image, nil, 10)
	if err != nil {
		return err
	}
	if len(annotations) == 0 {
		fmt.Fprintln(w, "No faces found.")
	} else {
		fmt.Fprintln(w, "Faces:")
		for i, annotation := range annotations {
			fmt.Fprintln(w, "  Face", i)
			fmt.Fprintln(w, "    Anger:", annotation.AngerLikelihood)
			fmt.Fprintln(w, "    Joy:", annotation.JoyLikelihood)
			fmt.Fprintln(w, "    Surprise:", annotation.SurpriseLikelihood)
		}
	}
	return nil
}

Java

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

public static void detectFacesGcs(String gcsPath, PrintStream out) throws Exception,
    IOException {
  List<AnnotateImageRequest> requests = new ArrayList<>();

  ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
  Image img = Image.newBuilder().setSource(imgSource).build();
  Feature feat = Feature.newBuilder().setType(Type.FACE_DETECTION).build();

  AnnotateImageRequest request =
      AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
  requests.add(request);

  try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
    BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
    List<AnnotateImageResponse> responses = response.getResponsesList();

    for (AnnotateImageResponse res : responses) {
      if (res.hasError()) {
        out.printf("Error: %s\n", res.getError().getMessage());
        return;
      }

      // For full list of available annotations, see http://g.co/cloud/vision/docs
      for (FaceAnnotation annotation : res.getFaceAnnotationsList()) {
        out.printf(
            "anger: %s\njoy: %s\nsurprise: %s\nposition: %s",
            annotation.getAngerLikelihood(),
            annotation.getJoyLikelihood(),
            annotation.getSurpriseLikelihood(),
            annotation.getBoundingPoly());
      }
    }
  }
}

Node.js

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

// Imports the Google Cloud client libraries
const vision = require('@google-cloud/vision');

// Creates a client
const client = new vision.ImageAnnotatorClient();

/**
 * TODO(developer): Uncomment the following lines before running the sample.
 */
// const bucketName = 'Bucket where the file resides, e.g. my-bucket';
// const fileName = 'Path to file within bucket, e.g. path/to/image.png';

// Performs face detection on the gcs file
client
  .faceDetection(`gs://${bucketName}/${fileName}`)
  .then(results => {
    const faces = results[0].faceAnnotations;

    console.log('Faces:');
    faces.forEach((face, i) => {
      console.log(`  Face #${i + 1}:`);
      console.log(`    Joy: ${face.joyLikelihood}`);
      console.log(`    Anger: ${face.angerLikelihood}`);
      console.log(`    Sorrow: ${face.sorrowLikelihood}`);
      console.log(`    Surprise: ${face.surpriseLikelihood}`);
    });
  })
  .catch(err => {
    console.error('ERROR:', err);
  });

PHP

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

namespace Google\Cloud\Samples\Vision;

use Google\Cloud\Vision\V1\ImageAnnotatorClient;

// $path = 'gs://path/to/your/image.jpg'

function detect_face_gcs($path)
{
    $imageAnnotator = new ImageAnnotatorClient();

    # annotate the image
    $response = $imageAnnotator->faceDetection($path);
    $faces = $response->getFaceAnnotations();

    # names of likelihood from google.cloud.vision.enums
    $likelihoodName = ['UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY',
    'POSSIBLE','LIKELY', 'VERY_LIKELY'];

    printf("%d faces found:" . PHP_EOL, count($faces));
    foreach ($faces as $face) {
        $anger = $face->getAngerLikelihood();
        printf("Anger: %s" . PHP_EOL, $likelihoodName[$anger]);

        $joy = $face->getJoyLikelihood();
        printf("Joy: %s" . PHP_EOL, $likelihoodName[$joy]);

        $surprise = $face->getSurpriseLikelihood();
        printf("Surprise: %s" . PHP_EOL, $likelihoodName[$surprise]);

        # get bounds
        $vertices = $face->getBoundingPoly()->getVertices();
        $bounds = [];
        foreach ($vertices as $vertex) {
            $bounds[] = sprintf('(%d,%d)', $vertex->getX(), $vertex->getY());
        }
        print('Bounds: ' . join(', ',$bounds) . PHP_EOL);
        print(PHP_EOL);
    }
}

Python

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

def detect_faces_uri(uri):
    """Detects faces in the file located in Google Cloud Storage or the web."""
    client = vision.ImageAnnotatorClient()
    image = vision.types.Image()
    image.source.image_uri = uri

    response = client.face_detection(image=image)
    faces = response.face_annotations

    # Names of likelihood from google.cloud.vision.enums
    likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
                       'LIKELY', 'VERY_LIKELY')
    print('Faces:')

    for face in faces:
        print('anger: {}'.format(likelihood_name[face.anger_likelihood]))
        print('joy: {}'.format(likelihood_name[face.joy_likelihood]))
        print('surprise: {}'.format(likelihood_name[face.surprise_likelihood]))

        vertices = (['({},{})'.format(vertex.x, vertex.y)
                    for vertex in face.bounding_poly.vertices])

        print('face bounds: {}'.format(','.join(vertices)))

Ruby

Vision API クライアントのインストールと作成の詳細については、Vision API クライアント ライブラリをご覧ください。

# project_id = "Your Google Cloud project ID"
# image_path = "Google Cloud Storage URI, eg. 'gs://my-bucket/image.png'"

require "google/cloud/vision"

vision = Google::Cloud::Vision.new project: project_id
image  = vision.image image_path

image.faces.each do |face|
  puts "Joy:      #{face.likelihood.joy?}"
  puts "Anger:    #{face.likelihood.anger?}"
  puts "Sorrow:   #{face.likelihood.sorrow?}"
  puts "Surprise: #{face.likelihood.surprise?}"
end

FACE_DETECTION レスポンス

FACE_DETECTION リクエストが成功すると、FaceAnnotation タイプの一連の faceAnnotations が含まれるレスポンスが生成されます。

次のコード例は、以下の写真に対する顔検出レスポンスの例です。

{
  "responses": [
    {
      "faceAnnotations": [
        {
          "boundingPoly": {
            "vertices": [
              {
                "x": 932,
                "y": 313
              },
              {
                "x": 1028,
                "y": 313
              },
              {
                "x": 1028,
                "y": 425
              },
              {
                "x": 932,
                "y": 425
              }
            ]
          },
          "fdBoundingPoly": {
            "vertices": [
              {
                "x": 936,
                "y": 333
              },
              {
                "x": 1017,
                "y": 333
              },
              {
                "x": 1017,
                "y": 413
              },
              {
                "x": 936,
                "y": 413
              }
            ]
          },
          "landmarks": [
            {
              "type": "LEFT_EYE",
              "position": {
                "x": 959.60065,
                "y": 355.98782,
                "z": -0.00016746686
              }
            },
            {
              "type": "RIGHT_EYE",
              "position": {
                "x": 984.92914,
                "y": 362.48074,
                "z": -14.466843
              }
            },
            {
              "type": "LEFT_OF_LEFT_EYEBROW",
              "position": {
                "x": 954.3997,
                "y": 348.13577,
                "z": 7.6285343
              }
            },
            {
              "type": "RIGHT_OF_LEFT_EYEBROW",
              "position": {
                "x": 965.15735,
                "y": 349.91434,
                "z": -7.9691405
              }
            },
            {
              "type": "LEFT_OF_RIGHT_EYEBROW",
              "position": {
                "x": 976.60974,
                "y": 352.59775,
                "z": -14.814832
              }
            },
            {
              "type": "RIGHT_OF_RIGHT_EYEBROW",
              "position": {
                "x": 995.2661,
                "y": 359.14246,
                "z": -15.962653
              }
            },
            {
              "type": "MIDPOINT_BETWEEN_EYES",
              "position": {
                "x": 968.7824,
                "y": 356.87964,
                "z": -12.243763
              }
            },
            {
              "type": "NOSE_TIP",
              "position": {
                "x": 959.8401,
                "y": 371.1136,
                "z": -21.012028
              }
            },
            {
              "type": "UPPER_LIP",
              "position": {
                "x": 960.88947,
                "y": 382.35114,
                "z": -15.794773
              }
            },
            {
              "type": "LOWER_LIP",
              "position": {
                "x": 959.00604,
                "y": 392.5613,
                "z": -14.722658
              }
            },
            {
              "type": "MOUTH_LEFT",
              "position": {
                "x": 953.558,
                "y": 385.6143,
                "z": -3.7360961
              }
            },
            {
              "type": "MOUTH_RIGHT",
              "position": {
                "x": 975.03265,
                "y": 390.148,
                "z": -14.706936
              }
            },
            {
              "type": "MOUTH_CENTER",
              "position": {
                "x": 960.53326,
                "y": 387.2606,
                "z": -14.258573
              }
            },
            {
              "type": "NOSE_BOTTOM_RIGHT",
              "position": {
                "x": 971.79193,
                "y": 376.1076,
                "z": -15.152608
              }
            },
            {
              "type": "NOSE_BOTTOM_LEFT",
              "position": {
                "x": 957.7185,
                "y": 373.1746,
                "z": -7.1614866
              }
            },
            {
              "type": "NOSE_BOTTOM_CENTER",
              "position": {
                "x": 962.575,
                "y": 376.29388,
                "z": -15.418351
              }
            },
            {
              "type": "LEFT_EYE_TOP_BOUNDARY",
              "position": {
                "x": 959.1816,
                "y": 353.80328,
                "z": -1.5174211
              }
            },
            {
              "type": "LEFT_EYE_RIGHT_CORNER",
              "position": {
                "x": 964.36786,
                "y": 357.51196,
                "z": -2.7060971
              }
            },
            {
              "type": "LEFT_EYE_BOTTOM_BOUNDARY",
              "position": {
                "x": 958.7769,
                "y": 358.01065,
                "z": -0.3359541
              }
            },
            {
              "type": "LEFT_EYE_LEFT_CORNER",
              "position": {
                "x": 955.79767,
                "y": 355.51834,
                "z": 5.151253
              }
            },
            {
              "type": "LEFT_EYE_PUPIL",
              "position": {
                "x": 958.7773,
                "y": 355.84012,
                "z": -0.38514262
              }
            },
            {
              "type": "RIGHT_EYE_TOP_BOUNDARY",
              "position": {
                "x": 983.61804,
                "y": 359.85156,
                "z": -15.601014
              }
            },
            {
              "type": "RIGHT_EYE_RIGHT_CORNER",
              "position": {
                "x": 990.0031,
                "y": 364.02197,
                "z": -14.567666
              }
            },
            {
              "type": "RIGHT_EYE_BOTTOM_BOUNDARY",
              "position": {
                "x": 983.9871,
                "y": 364.64563,
                "z": -14.510015
              }
            },
            {
              "type": "RIGHT_EYE_LEFT_CORNER",
              "position": {
                "x": 978.7498,
                "y": 361.2154,
                "z": -11.121486
              }
            },
            {
              "type": "RIGHT_EYE_PUPIL",
              "position": {
                "x": 983.81213,
                "y": 362.04236,
                "z": -14.877491
              }
            },
            {
              "type": "LEFT_EYEBROW_UPPER_MIDPOINT",
              "position": {
                "x": 959.80444,
                "y": 345.24878,
                "z": -1.5490607
              }
            },
            {
              "type": "RIGHT_EYEBROW_UPPER_MIDPOINT",
              "position": {
                "x": 986.2949,
                "y": 351.80408,
                "z": -16.823978
              }
            },
            {
              "type": "LEFT_EAR_TRAGION",
              "position": {
                "x": 956.0783,
                "y": 372.93738,
                "z": 39.021652
              }
            },
            {
              "type": "RIGHT_EAR_TRAGION",
              "position": {
                "x": 1012.669,
                "y": 387.13126,
                "z": 7.191323
              }
            },
            {
              "type": "FOREHEAD_GLABELLA",
              "position": {
                "x": 970.6526,
                "y": 350.78348,
                "z": -12.321499
              }
            },
            {
              "type": "CHIN_GNATHION",
              "position": {
                "x": 956.40735,
                "y": 406.87085,
                "z": -12.346105
              }
            },
            {
              "type": "CHIN_LEFT_GONION",
              "position": {
                "x": 948.2937,
                "y": 388.85358,
                "z": 25.902096
              }
            },
            {
              "type": "CHIN_RIGHT_GONION",
              "position": {
                "x": 998.49835,
                "y": 401.61972,
                "z": -3.1576655
              }
            }
          ],
          "rollAngle": 16.379295,
          "panAngle": -29.333826,
          "tiltAngle": 4.458676,
          "detectionConfidence": 0.980691,
          "landmarkingConfidence": 0.57905465,
          "joyLikelihood": "VERY_LIKELY",
          "sorrowLikelihood": "VERY_UNLIKELY",
          "angerLikelihood": "VERY_UNLIKELY",
          "surpriseLikelihood": "VERY_UNLIKELY",
          "underExposedLikelihood": "VERY_UNLIKELY",
          "blurredLikelihood": "VERY_UNLIKELY",
          "headwearLikelihood": "VERY_UNLIKELY"
        }
      ]
    }
  ]
}
このページは役立ちましたか?評価をお願いいたします。

フィードバックを送信...

Cloud Vision API ドキュメント