Cloud Storage system tests

Demonstrates how to system test a function triggered by Cloud Storage.

Documentation pages that include this code sample

To view the code sample used in context, see the following documentation:

Code sample


#include <google/cloud/storage/client.h>
#include <boost/process.hpp>
#include <fmt/format.h>
#include <gmock/gmock.h>
#include <chrono>
#include <random>
#include <string>
#include <thread>

namespace {

namespace bp = ::boost::process;
namespace gcs = ::google::cloud::storage;
using ::testing::HasSubstr;

class StorageSystemTest : public ::testing::Test {
  void SetUp() override {
    // This test can only run if it is properly configured.
    auto const* project_id = std::getenv("GOOGLE_CLOUD_PROJECT");
    ASSERT_NE(project_id, nullptr);
    project_id_ = project_id;

    auto const* bucket_name = std::getenv("BUCKET_NAME");
    ASSERT_NE(bucket_name, nullptr);
    bucket_name_ = bucket_name;

    auto const* service_id = std::getenv("SERVICE_ID");
    ASSERT_NE(service_id, nullptr);
    service_id_ = service_id;

  void TearDown() override {}

  [[nodiscard]] std::string const& project_id() const { return project_id_; }
  [[nodiscard]] std::string const& bucket_name() const { return bucket_name_; }
  [[nodiscard]] std::string const& service_id() const { return service_id_; }

  std::string project_id_;
  std::string bucket_name_;
  std::string service_id_;

TEST_F(StorageSystemTest, Basic) {
  auto client = gcs::Client::CreateDefaultClient();

  // Use a random name to avoid interference from other tests.
  auto gen = std::mt19937_64(std::random_device{}());
  auto rnd = [&gen] {
    return std::to_string(std::uniform_int_distribution<std::uint64_t>{}(gen));
  auto const object_name = "test-" + rnd() + '-' + rnd() + '-' + rnd();
  auto const expected = "Object: " + object_name;
  SCOPED_TRACE("Testing for " + object_name);
  auto meta =
      client->InsertObject(bucket_name(), object_name, "Lorem ipsum...");

  std::vector<std::string> lines;
  // It may take a few seconds for the logs to propagate, so try a few times.
  using namespace std::chrono_literals;
  for (auto delay : {1s, 2s, 4s, 8s, 16s}) {
    bp::ipstream out;
    auto constexpr kProject = "--project={}";
    auto constexpr kLogFilter =
        "resource.type=cloud_run_revision AND "
        "resource.labels.service_name={} AND "
    auto reader = bp::child(bp::search_path("gcloud"), "logging", "read",
                            fmt::format(kProject, project_id()),
                            fmt::format(kLogFilter, service_id()),
                            "--format=value(textPayload)", "--limit=100",
                            (bp::std_out & bp::std_err) > out);
    ASSERT_EQ(reader.exit_code(), 0);
    for (std::string l; std::getline(out, l);) lines.push_back(std::move(l));
    auto count = std::count_if(
        lines.begin(), lines.end(),
        [s = expected](auto l) { return l.find(s) != std::string::npos; });
    if (count != 0) break;
  EXPECT_THAT(lines, Contains(HasSubstr(expected)));
  EXPECT_TRUE(client->DeleteObject(bucket_name(), object_name).ok());

}  // namespace


package helloworld

import (


func TestHelloGCSSystem(t *testing.T) {
	ctx := context.Background()
	bucketName := os.Getenv("BUCKET_NAME")

	client, err := storage.NewClient(ctx)
	if err != nil {
		t.Fatalf("storage.NewClient: %v", err)

	// Create a file.
	startTime := time.Now().UTC().Format(time.RFC3339)
	oh := client.Bucket(bucketName).Object("TestHelloGCSSystem")
	w := oh.NewWriter(ctx)
	fmt.Fprintf(w, "Content of the file")

	// Wait for logs to be consistent.
	time.Sleep(20 * time.Second)

	// Check logs.
	want := "created"
	if got := readLogs(t, startTime); !strings.Contains(got, want) {
		t.Errorf("HelloGCS logged %q, want to contain %q", got, want)

	// Modify the file.
	startTime = time.Now().UTC().Format(time.RFC3339)
	_, err = oh.Update(ctx, storage.ObjectAttrsToUpdate{
		Metadata: map[string]string{"Content-Type": "text/html"},
	if err != nil {
		t.Errorf("Update: %v", err)

	// Wait for logs to be consistent.
	time.Sleep(20 * time.Second)

	// Check logs.
	want = "updated"
	if got := readLogs(t, startTime); !strings.Contains(got, want) {
		t.Errorf("HelloGCS logged %q, want to contain %q", got, want)

	// Delete the file.
	startTime = time.Now().UTC().Format(time.RFC3339)
	if err := oh.Delete(ctx); err != nil {
		t.Errorf("Delete: %v", err)

	// Wait for logs to be consistent.
	time.Sleep(20 * time.Second)

	// Check logs.
	want = "deleted"
	if got := readLogs(t, startTime); !strings.Contains(got, want) {
		t.Errorf("HelloGCS logged %q, want to contain %q", got, want)

func readLogs(t *testing.T, startTime string) string {
	cmd := exec.Command("gcloud", "alpha", "functions", "logs", "read", "HelloGCS", "--start-time", startTime)
	got, err := cmd.CombinedOutput()
	if err != nil {
		t.Fatalf("exec.Command: %v", err)
	return string(got)


import static;

import io.github.resilience4j.core.IntervalFunction;
import io.github.resilience4j.retry.Retry;
import io.github.resilience4j.retry.RetryConfig;
import io.github.resilience4j.retry.RetryRegistry;
import java.time.Duration;
import java.time.Instant;
import java.time.format.DateTimeFormatter;
import java.util.UUID;
import org.junit.BeforeClass;
import org.junit.Test;

public class ExampleSystemIT {

  // TODO<developer>: set these values (as environment variables)
  private static final String PROJECT_ID = System.getenv("GCP_PROJECT");
  private static final String FUNCTIONS_BUCKET = System.getenv("FUNCTIONS_BUCKET");
  private static final String FUNCTION_DEPLOYED_NAME = "HelloGcs";
  private static final Storage STORAGE = StorageOptions.getDefaultInstance().getService();

  private static Logging loggingClient;

  private HelloGcs sampleUnderTest;

  public static void setUp() throws IOException {
    loggingClient = LoggingOptions.getDefaultInstance().getService();

  private static String getLogEntriesAsString(String startTimestamp) {
    // Construct Stackdriver logging filter
    // See this page for more info:
    String filter = "resource.type=\"cloud_function\""
        + " AND severity=INFO"
        + " AND resource.labels.function_name=" + FUNCTION_DEPLOYED_NAME
        + String.format(" AND timestamp>=\"%s\"", startTimestamp);

    // Get Stackdriver logging entries
    Page<LogEntry> logEntries =
                Logging.SortingField.TIMESTAMP, Logging.SortingOrder.DESCENDING)

    // Serialize Stackdriver logging entries + collect them into a single string
    String logsConcat =, false)
        .map((x) -> x.toString())

    return logsConcat;

  public void helloGcs_shouldRunOnGcf() {
    String filename = String.format("test-%s.txt", UUID.randomUUID());

    // Subtract time to work-around local-GCF clock difference
    Instant startInstant =;
    String startTimestamp = DateTimeFormatter.ISO_INSTANT.format(startInstant);

    // Upload a file to Cloud Storage
    BlobInfo blobInfo = BlobInfo.newBuilder(BlobId.of(FUNCTIONS_BUCKET, filename)).build();

    // Keep retrying until the logs contain the desired invocation's log entry
    // (If the invocation failed, the retry process will eventually time out)
    String expected = String.format("File: %s", filename);
    RetryRegistry registry = RetryRegistry.of(RetryConfig.custom()
        .intervalFunction(IntervalFunction.ofExponentialBackoff(1000, 2))
        .retryOnResult(s -> !s.toString().contains(expected))
    Retry retry = registry.retry(filename);
    String logEntry = Retry
        .decorateFunction(retry, ExampleSystemIT::getLogEntriesAsString)

    // Perform final assertion (to make sure we fail on timeout)


const {Storage} = require('@google-cloud/storage');
const storage = new Storage();
const uuid = require('uuid');
const assert = require('assert');
const path = require('path');
const childProcess = require('child_process');
const moment = require('moment');
const promiseRetry = require('promise-retry');

// Use unique GCS filename to avoid conflicts between concurrent test runs
const gcsFileName = `test-${uuid.v4()}.txt`;

const localFileName = 'test.txt';
const bucketName = process.env.FUNCTIONS_DELETABLE_BUCKET;
if (!bucketName) {
  throw new Error('"FUNCTIONS_DELETABLE_BUCKET" env var must be set.');
if (!process.env.GCF_REGION) {
  throw new Error('"GCF_REGION" env var must be set.');
const bucket = storage.bucket(bucketName);
const baseCmd = 'gcloud functions';

describe('system tests', () => {
  it('helloGCS: should print event', async () => {
    // Subtract time to work-around local-GCF clock difference
    const startTime = moment().subtract(2, 'minutes').toISOString();

    // Upload file
    const filepath = path.join(__dirname, localFileName);
    await bucket.upload(filepath, {
      destination: gcsFileName,

    // Wait for logs to become consistent
    await promiseRetry(retry => {
      const logs = childProcess
        .execSync(`${baseCmd} logs read helloGCS --start-time ${startTime}`)

      try {
        assert.ok(logs.includes(`File: ${gcsFileName}`));
        assert.ok(logs.includes('Event Type:'));
      } catch (err) {
        console.log('An error occurred, retrying:', err);


from datetime import datetime
from os import getenv, path
import subprocess
import time
import uuid

from import storage
import pytest

BUCKET = getenv('BUCKET')

assert PROJECT is not None
assert BUCKET is not None

def storage_client():
    yield storage.Client()

def bucket_object(storage_client):
    bucket_object = storage_client.get_bucket(BUCKET)
    yield bucket_object

def uploaded_file(bucket_object):
    name = 'test-{}.txt'.format(str(uuid.uuid4()))
    blob = bucket_object.blob(name)

    test_dir = path.dirname(path.abspath(__file__))
    blob.upload_from_filename(path.join(test_dir, 'test.txt'))
    yield name

def test_hello_gcs(uploaded_file):
    start_time = datetime.utcnow().isoformat()
    time.sleep(10)  # Wait for logs to become consistent

    log_process = subprocess.Popen([
    ], stdout=subprocess.PIPE)
    logs = str(log_process.communicate()[0])
    assert uploaded_file in logs

What's next

To search and filter code samples for other Google Cloud products, see the Google Cloud sample browser.