Utilizzo dell'API Cloud Vision per determinare se l'immagine è sicura

Questo tutorial mostra come utilizzare Cloud Run, l'API Cloud Vision e ImageMagick per rilevare e sfocare le immagini offensive caricate in un bucket Cloud Storage.

Per saperne di più

Per la documentazione dettagliata che include questo esempio di codice, consulta quanto segue:

Esempio di codice

Go

Per autenticarti a Cloud Run, configura le Credenziali predefinite dell'applicazione. Per ulteriori informazioni, consulta Configurare l'autenticazione per un ambiente di sviluppo locale.


// GCSEvent is the payload of a GCS event.
type GCSEvent struct {
	Bucket string `json:"bucket"`
	Name   string `json:"name"`
}

// BlurOffensiveImages blurs offensive images uploaded to GCS.
func BlurOffensiveImages(ctx context.Context, e GCSEvent) error {
	outputBucket := os.Getenv("BLURRED_BUCKET_NAME")
	if outputBucket == "" {
		return errors.New("BLURRED_BUCKET_NAME must be set")
	}

	img := vision.NewImageFromURI(fmt.Sprintf("gs://%s/%s", e.Bucket, e.Name))

	resp, err := visionClient.DetectSafeSearch(ctx, img, nil)
	if err != nil {
		return fmt.Errorf("AnnotateImage: %w", err)
	}

	if resp.GetAdult() == visionpb.Likelihood_VERY_LIKELY ||
		resp.GetViolence() == visionpb.Likelihood_VERY_LIKELY {
		return blur(ctx, e.Bucket, outputBucket, e.Name)
	}
	log.Printf("The image %q was detected as OK.", e.Name)
	return nil
}

Java

Per autenticarti a Cloud Run, configura le Credenziali predefinite dell'applicazione. Per ulteriori informazioni, consulta Configurare l'autenticazione per un ambiente di sviluppo locale.

// Blurs uploaded images that are flagged as Adult or Violence.
public static void blurOffensiveImages(JsonObject data) {
  String fileName = data.get("name").getAsString();
  String bucketName = data.get("bucket").getAsString();
  BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, fileName).build();
  // Construct URI to GCS bucket and file.
  String gcsPath = String.format("gs://%s/%s", bucketName, fileName);
  System.out.println(String.format("Analyzing %s", fileName));

  // Construct request.
  List<AnnotateImageRequest> requests = new ArrayList<>();
  ImageSource imgSource = ImageSource.newBuilder().setImageUri(gcsPath).build();
  Image img = Image.newBuilder().setSource(imgSource).build();
  Feature feature = Feature.newBuilder().setType(Type.SAFE_SEARCH_DETECTION).build();
  AnnotateImageRequest request =
      AnnotateImageRequest.newBuilder().addFeatures(feature).setImage(img).build();
  requests.add(request);

  // Send request to the Vision API.
  try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
    BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
    List<AnnotateImageResponse> responses = response.getResponsesList();
    for (AnnotateImageResponse res : responses) {
      if (res.hasError()) {
        System.out.println(String.format("Error: %s\n", res.getError().getMessage()));
        return;
      }
      // Get Safe Search Annotations
      SafeSearchAnnotation annotation = res.getSafeSearchAnnotation();
      if (annotation.getAdultValue() == 5 || annotation.getViolenceValue() == 5) {
        System.out.println(String.format("Detected %s as inappropriate.", fileName));
        blur(blobInfo);
      } else {
        System.out.println(String.format("Detected %s as OK.", fileName));
      }
    }
  } catch (Exception e) {
    System.out.println(String.format("Error with Vision API: %s", e.getMessage()));
  }
}

Node.js

Per autenticarti a Cloud Run, configura le Credenziali predefinite dell'applicazione. Per ulteriori informazioni, consulta Configurare l'autenticazione per un ambiente di sviluppo locale.

// Blurs uploaded images that are flagged as Adult or Violence.
exports.blurOffensiveImages = async event => {
  // This event represents the triggering Cloud Storage object.
  const object = event;

  const file = storage.bucket(object.bucket).file(object.name);
  const filePath = `gs://${object.bucket}/${object.name}`;

  console.log(`Analyzing ${file.name}.`);

  try {
    const [result] = await client.safeSearchDetection(filePath);
    const detections = result.safeSearchAnnotation || {};

    if (
      // Levels are defined in https://cloud.google.com/vision/docs/reference/rest/v1/AnnotateImageResponse#likelihood
      detections.adult === 'VERY_LIKELY' ||
      detections.violence === 'VERY_LIKELY'
    ) {
      console.log(`Detected ${file.name} as inappropriate.`);
      return blurImage(file, BLURRED_BUCKET_NAME);
    } else {
      console.log(`Detected ${file.name} as OK.`);
    }
  } catch (err) {
    console.error(`Failed to analyze ${file.name}.`, err);
    throw err;
  }
};

Python

Per autenticarti a Cloud Run, configura le Credenziali predefinite dell'applicazione. Per ulteriori informazioni, consulta Configurare l'autenticazione per un ambiente di sviluppo locale.

def blur_offensive_images(data):
    """Blurs uploaded images that are flagged as Adult or Violence.

    Args:
        data: Pub/Sub message data
    """
    file_data = data

    file_name = file_data["name"]
    bucket_name = file_data["bucket"]

    blob = storage_client.bucket(bucket_name).get_blob(file_name)
    blob_uri = f"gs://{bucket_name}/{file_name}"
    blob_source = vision.Image(source=vision.ImageSource(image_uri=blob_uri))

    # Ignore already-blurred files
    if file_name.startswith("blurred-"):
        print(f"The image {file_name} is already blurred.")
        return

    print(f"Analyzing {file_name}.")

    result = vision_client.safe_search_detection(image=blob_source)
    detected = result.safe_search_annotation

    # Process image
    if detected.adult == 5 or detected.violence == 5:
        print(f"The image {file_name} was detected as inappropriate.")
        return __blur_image(blob)
    else:
        print(f"The image {file_name} was detected as OK.")

Passaggi successivi

Per cercare e filtrare gli esempi di codice per altri prodotti Google Cloud, consulta il browser di esempi di Google Cloud.