Imagen을 사용한 마스크 기반 아웃페인팅을 사용하여 이미지 콘텐츠 확장

이 샘플에서는 마스크 기반 이미지 수정에 Imagen 모델을 사용하는 방법을 보여줍니다. 기본 이미지 콘텐츠를 더 크거나 다른 크기의 캔버스에 맞게 확장할 타겟팅된 마스크 영역을 지정합니다.

코드 샘플

Java

이 샘플을 사용해 보기 전에 Vertex AI 빠른 시작: 클라이언트 라이브러리 사용Java 설정 안내를 따르세요. 자세한 내용은 Vertex AI Java API 참고 문서를 참조하세요.

Vertex AI에 인증하려면 애플리케이션 기본 사용자 인증 정보를 설정합니다. 자세한 내용은 로컬 개발 환경의 인증 설정을 참조하세요.


import com.google.api.gax.rpc.ApiException;
import com.google.cloud.aiplatform.v1.EndpointName;
import com.google.cloud.aiplatform.v1.PredictResponse;
import com.google.cloud.aiplatform.v1.PredictionServiceClient;
import com.google.cloud.aiplatform.v1.PredictionServiceSettings;
import com.google.gson.Gson;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.Value;
import com.google.protobuf.util.JsonFormat;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Base64;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;

public class EditImageOutpaintingMaskSample {

  public static void main(String[] args) throws IOException {
    // TODO(developer): Replace these variables before running the sample.
    String projectId = "my-project-id";
    String location = "us-central1";
    String inputPath = "/path/to/my-input.png";
    String maskPath = "/path/to/my-mask.png";
    String prompt = ""; // The optional text prompt describing what you want to see inserted.

    editImageOutpaintingMask(projectId, location, inputPath, maskPath, prompt);
  }

  // Edit an image using a mask file. Outpainting lets you expand the content of a base image to fit
  // a larger or differently sized mask canvas.
  public static PredictResponse editImageOutpaintingMask(
      String projectId, String location, String inputPath, String maskPath, String prompt)
      throws ApiException, IOException {
    final String endpoint = String.format("%s-aiplatform.googleapis.com:443", location);
    PredictionServiceSettings predictionServiceSettings =
        PredictionServiceSettings.newBuilder().setEndpoint(endpoint).build();

    // Initialize client that will be used to send requests. This client only needs to be created
    // once, and can be reused for multiple requests.
    try (PredictionServiceClient predictionServiceClient =
        PredictionServiceClient.create(predictionServiceSettings)) {

      final EndpointName endpointName =
          EndpointName.ofProjectLocationPublisherModelName(
              projectId, location, "google", "imagegeneration@006");

      // Encode image and mask to Base64
      String imageBase64 =
          Base64.getEncoder().encodeToString(Files.readAllBytes(Paths.get(inputPath)));
      String maskBase64 =
          Base64.getEncoder().encodeToString(Files.readAllBytes(Paths.get(maskPath)));

      // Create the image and image mask maps
      Map<String, String> imageMap = new HashMap<>();
      imageMap.put("bytesBase64Encoded", imageBase64);

      Map<String, String> maskMap = new HashMap<>();
      maskMap.put("bytesBase64Encoded", maskBase64);
      Map<String, Map> imageMaskMap = new HashMap<>();
      imageMaskMap.put("image", maskMap);

      Map<String, Object> instancesMap = new HashMap<>();
      instancesMap.put("prompt", prompt); // [ "prompt", "<my-prompt>" ]
      instancesMap.put(
          "image", imageMap); // [ "image", [ "bytesBase64Encoded", "iVBORw0KGgo...==" ] ]
      instancesMap.put(
          "mask",
          imageMaskMap); // [ "mask", [ "image", [ "bytesBase64Encoded", "iJKDF0KGpl...==" ] ] ]
      instancesMap.put("editMode", "outpainting"); // [ "editMode", "outpainting" ]
      Value instances = mapToValue(instancesMap);

      // Optional parameters
      Map<String, Object> paramsMap = new HashMap<>();
      paramsMap.put("sampleCount", 1);
      Value parameters = mapToValue(paramsMap);

      PredictResponse predictResponse =
          predictionServiceClient.predict(
              endpointName, Collections.singletonList(instances), parameters);

      for (Value prediction : predictResponse.getPredictionsList()) {
        Map<String, Value> fieldsMap = prediction.getStructValue().getFieldsMap();
        if (fieldsMap.containsKey("bytesBase64Encoded")) {
          String bytesBase64Encoded = fieldsMap.get("bytesBase64Encoded").getStringValue();
          Path tmpPath = Files.createTempFile("imagen-", ".png");
          Files.write(tmpPath, Base64.getDecoder().decode(bytesBase64Encoded));
          System.out.format("Image file written to: %s\n", tmpPath.toUri());
        }
      }
      return predictResponse;
    }
  }

  private static Value mapToValue(Map<String, Object> map) throws InvalidProtocolBufferException {
    Gson gson = new Gson();
    String json = gson.toJson(map);
    Value.Builder builder = Value.newBuilder();
    JsonFormat.parser().merge(json, builder);
    return builder.build();
  }
}

Node.js

이 샘플을 사용해 보기 전에 Vertex AI 빠른 시작: 클라이언트 라이브러리 사용Node.js 설정 안내를 따르세요. 자세한 내용은 Vertex AI Node.js API 참고 문서를 참조하세요.

Vertex AI에 인증하려면 애플리케이션 기본 사용자 인증 정보를 설정합니다. 자세한 내용은 로컬 개발 환경의 인증 설정을 참조하세요.

/**
 * TODO(developer): Update these variables before running the sample.
 */
const projectId = process.env.CAIP_PROJECT_ID;
const location = 'us-central1';
const inputFile = 'resources/roller_skaters.png';
const maskFile = 'resources/roller_skaters_mask.png';
const prompt = 'city with skyscrapers';

const aiplatform = require('@google-cloud/aiplatform');

// Imports the Google Cloud Prediction Service Client library
const {PredictionServiceClient} = aiplatform.v1;

// Import the helper module for converting arbitrary protobuf.Value objects
const {helpers} = aiplatform;

// Specifies the location of the api endpoint
const clientOptions = {
  apiEndpoint: `${location}-aiplatform.googleapis.com`,
};

// Instantiates a client
const predictionServiceClient = new PredictionServiceClient(clientOptions);

async function editImageOutpaintingMask() {
  const fs = require('fs');
  const util = require('util');
  // Configure the parent resource
  const endpoint = `projects/${projectId}/locations/${location}/publishers/google/models/imagegeneration@006`;

  const imageFile = fs.readFileSync(inputFile);
  // Convert the image data to a Buffer and base64 encode it.
  const encodedImage = Buffer.from(imageFile).toString('base64');

  const maskImageFile = fs.readFileSync(maskFile);
  // Convert the image mask data to a Buffer and base64 encode it.
  const encodedMask = Buffer.from(maskImageFile).toString('base64');

  const promptObj = {
    prompt: prompt, // The optional text prompt describing what you want to see inserted
    editMode: 'outpainting',
    image: {
      bytesBase64Encoded: encodedImage,
    },
    mask: {
      image: {
        bytesBase64Encoded: encodedMask,
      },
    },
  };
  const instanceValue = helpers.toValue(promptObj);
  const instances = [instanceValue];

  const parameter = {
    // Optional parameters
    seed: 100,
    // Controls the strength of the prompt
    // 0-9 (low strength), 10-20 (medium strength), 21+ (high strength)
    guidanceScale: 21,
    sampleCount: 1,
  };
  const parameters = helpers.toValue(parameter);

  const request = {
    endpoint,
    instances,
    parameters,
  };

  // Predict request
  const [response] = await predictionServiceClient.predict(request);
  const predictions = response.predictions;
  if (predictions.length === 0) {
    console.log(
      'No image was generated. Check the request parameters and prompt.'
    );
  } else {
    let i = 1;
    for (const prediction of predictions) {
      const buff = Buffer.from(
        prediction.structValue.fields.bytesBase64Encoded.stringValue,
        'base64'
      );
      // Write image content to the output file
      const writeFile = util.promisify(fs.writeFile);
      const filename = `output${i}.png`;
      await writeFile(filename, buff);
      console.log(`Saved image ${filename}`);
      i++;
    }
  }
}
await editImageOutpaintingMask();

Python

이 샘플을 사용해 보기 전에 Vertex AI 빠른 시작: 클라이언트 라이브러리 사용Python 설정 안내를 따르세요. 자세한 내용은 Vertex AI Python API 참고 문서를 참조하세요.

Vertex AI에 인증하려면 애플리케이션 기본 사용자 인증 정보를 설정합니다. 자세한 내용은 로컬 개발 환경의 인증 설정을 참조하세요.


import vertexai
from vertexai.preview.vision_models import Image, ImageGenerationModel

# TODO(developer): Update and un-comment below lines
# PROJECT_ID = "your-project-id"
# input_file = "input-image.png"
# mask_file = "mask-image.png"
# output_file = "output-image.png"
# prompt = "" # The optional text prompt describing what you want to see inserted.

vertexai.init(project=PROJECT_ID, location="us-central1")

model = ImageGenerationModel.from_pretrained("imagegeneration@006")
base_img = Image.load_from_file(location=input_file)
mask_img = Image.load_from_file(location=mask_file)

images = model.edit_image(
    base_image=base_img,
    mask=mask_img,
    prompt=prompt,
    edit_mode="outpainting",
)

images[0].save(location=output_file, include_generation_parameters=False)

# Optional. View the edited image in a notebook.
# images[0].show()

print(f"Created output image using {len(images[0]._image_bytes)} bytes")
# Example response:
# Created output image using 1234567 bytes

다음 단계

다른 Google Cloud 제품의 코드 샘플을 검색하고 필터링하려면 Google Cloud 샘플 브라우저를 참조하세요.