Method: projects.locations.publishers.models.streamGenerateContent

Generate content with multimodal inputs with streaming support.

Endpoint

post https://{endpoint}/v1beta1/{model}:streamGenerateContent

Where {service-endpoint} is one of the supported service endpoints.

Path parameters

model string

Required. The fully qualified name of the publisher model or tuned model endpoint to use.

Publisher model format: projects/{project}/locations/{location}/publishers/*/models/*

Tuned model endpoint format: projects/{project}/locations/{location}/endpoints/{endpoint}

Request body

The request body contains data with the following structure:

Fields
contents[] object (Content)

Required. The content of the current conversation with the model.

For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request.

cachedContent string

Optional. The name of the cached content used as context to serve the prediction. Note: only used in explicit caching, where users can have control over caching (e.g. what content to cache) and enjoy guaranteed cost savings. Format: projects/{project}/locations/{location}/cachedContents/{cachedContent}

tools[] object (Tool)

Optional. A list of Tools the model may use to generate the next response.

A Tool is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model.

toolConfig object (ToolConfig)

Optional. Tool config. This config is shared for all tools provided in the request.

labels map (key: string, value: string)

Optional. The labels with user-defined metadata for the request. It is used for billing and reporting only.

label keys and values can be no longer than 63 characters (Unicode codepoints) and can only contain lowercase letters, numeric characters, underscores, and dashes. International characters are allowed. label values are optional. label keys must start with a letter.

safetySettings[] object (SafetySetting)

Optional. Per request settings for blocking unsafe content. Enforced on GenerateContentResponse.candidates.

generationConfig object (GenerationConfig)

Optional. Generation config.

systemInstruction object (Content)

Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph.

Example request

Grounding:Public data

Java

import com.google.cloud.vertexai.VertexAI;
import com.google.cloud.vertexai.api.GenerateContentResponse;
import com.google.cloud.vertexai.api.GoogleSearchRetrieval;
import com.google.cloud.vertexai.api.GroundingMetadata;
import com.google.cloud.vertexai.api.Tool;
import com.google.cloud.vertexai.generativeai.GenerativeModel;
import com.google.cloud.vertexai.generativeai.ResponseHandler;
import java.io.IOException;
import java.util.Collections;

public class GroundingWithPublicData {
  public static void main(String[] args) throws IOException {
    // TODO(developer): Replace these variables before running the sample.
    String projectId = "your-google-cloud-project-id";
    String location = "us-central1";
    String modelName = "gemini-1.5-flash-001";

    groundWithPublicData(projectId, location, modelName);
  }

  // A request whose response will be "grounded" with information found in Google Search.
  public static String groundWithPublicData(String projectId, String location, String modelName)
      throws IOException {
    // Initialize client that will be used to send requests.
    // This client only needs to be created once, and can be reused for multiple requests.
    try (VertexAI vertexAI = new VertexAI(projectId, location)) {
      Tool googleSearchTool =
          Tool.newBuilder()
              .setGoogleSearchRetrieval(
                  // Enable using the result from this tool in detecting grounding
                  GoogleSearchRetrieval.newBuilder())
              .build();

      GenerativeModel model =
          new GenerativeModel(modelName, vertexAI)
              .withTools(Collections.singletonList(googleSearchTool));

      GenerateContentResponse response = model.generateContent("Why is the sky blue?");

      GroundingMetadata groundingMetadata = response.getCandidates(0).getGroundingMetadata();
      String answer = ResponseHandler.getText(response);

      System.out.println("Answer: " + answer);
      System.out.println("Grounding metadata: " + groundingMetadata);

      return answer;
    }
  }
}

Node.js

const {VertexAI} = require('@google-cloud/vertexai');

/**
 * TODO(developer): Update these variables before running the sample.
 */
async function generateContentWithGoogleSearchGrounding(
  projectId = 'PROJECT_ID',
  location = 'us-central1',
  model = 'gemini-1.5-flash-001'
) {
  // Initialize Vertex with your Cloud project and location
  const vertexAI = new VertexAI({project: projectId, location: location});

  const generativeModelPreview = vertexAI.preview.getGenerativeModel({
    model: model,
    generationConfig: {maxOutputTokens: 256},
  });

  const googleSearchRetrievalTool = {
    googleSearchRetrieval: {},
  };

  const request = {
    contents: [{role: 'user', parts: [{text: 'Why is the sky blue?'}]}],
    tools: [googleSearchRetrievalTool],
  };

  const result = await generativeModelPreview.generateContent(request);
  const response = await result.response;
  const groundingMetadata = response.candidates[0].groundingMetadata;
  console.log(
    'Response: ',
    JSON.stringify(response.candidates[0].content.parts[0].text)
  );
  console.log('GroundingMetadata is: ', JSON.stringify(groundingMetadata));
}

Grounding:Private data

Java

import com.google.cloud.vertexai.VertexAI;
import com.google.cloud.vertexai.api.GenerateContentResponse;
import com.google.cloud.vertexai.api.GroundingMetadata;
import com.google.cloud.vertexai.api.Retrieval;
import com.google.cloud.vertexai.api.Tool;
import com.google.cloud.vertexai.api.VertexAISearch;
import com.google.cloud.vertexai.generativeai.GenerativeModel;
import com.google.cloud.vertexai.generativeai.ResponseHandler;
import java.io.IOException;
import java.util.Collections;

public class GroundingWithPrivateData {
  public static void main(String[] args) throws IOException {
    // TODO(developer): Replace these variables before running the sample.
    String projectId = "your-google-cloud-project-id";
    String location = "us-central1";
    String modelName = "gemini-1.5-flash-001";
    String datastore = String.format(
        "projects/%s/locations/global/collections/default_collection/dataStores/%s",
        projectId, "datastore_id");

    groundWithPrivateData(projectId, location, modelName, datastore);
  }

  // A request whose response will be "grounded"
  // with information found in Vertex AI Search datastores.
  public static String groundWithPrivateData(String projectId, String location, String modelName,
                                             String datastoreId)
      throws IOException {
    // Initialize client that will be used to send requests.
    // This client only needs to be created once, and can be reused for multiple requests.
    try (VertexAI vertexAI = new VertexAI(projectId, location)) {
      Tool datastoreTool = Tool.newBuilder()
          .setRetrieval(
              Retrieval.newBuilder()
                  .setVertexAiSearch(VertexAISearch.newBuilder().setDatastore(datastoreId))
                  .setDisableAttribution(false))
          .build();

      GenerativeModel model = new GenerativeModel(modelName, vertexAI).withTools(
          Collections.singletonList(datastoreTool)
      );

      GenerateContentResponse response = model.generateContent(
          "How do I make an appointment to renew my driver's license?");

      GroundingMetadata groundingMetadata = response.getCandidates(0).getGroundingMetadata();
      String answer = ResponseHandler.getText(response);

      System.out.println("Answer: " + answer);
      System.out.println("Grounding metadata: " + groundingMetadata);

      return answer;
    }
  }
}

Node.js

const {
  VertexAI,
  HarmCategory,
  HarmBlockThreshold,
} = require('@google-cloud/vertexai');

/**
 * TODO(developer): Update these variables before running the sample.
 */
async function generateContentWithVertexAISearchGrounding(
  projectId = 'PROJECT_ID',
  location = 'us-central1',
  model = 'gemini-1.5-flash-001',
  dataStoreId = 'DATASTORE_ID'
) {
  // Initialize Vertex with your Cloud project and location
  const vertexAI = new VertexAI({project: projectId, location: location});

  const generativeModelPreview = vertexAI.preview.getGenerativeModel({
    model: model,
    // The following parameters are optional
    // They can also be passed to individual content generation requests
    safetySettings: [
      {
        category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
        threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
      },
    ],
    generationConfig: {maxOutputTokens: 256},
  });

  const vertexAIRetrievalTool = {
    retrieval: {
      vertexAiSearch: {
        datastore: `projects/${projectId}/locations/global/collections/default_collection/dataStores/${dataStoreId}`,
      },
      disableAttribution: false,
    },
  };

  const request = {
    contents: [{role: 'user', parts: [{text: 'Why is the sky blue?'}]}],
    tools: [vertexAIRetrievalTool],
  };

  const result = await generativeModelPreview.generateContent(request);
  const response = result.response;
  const groundingMetadata = response.candidates[0];
  console.log('Response: ', JSON.stringify(response.candidates[0]));
  console.log('GroundingMetadata is: ', JSON.stringify(groundingMetadata));
}

Grounding:Vais

C#


using Google.Cloud.AIPlatform.V1;
using System;
using System.Threading.Tasks;

public class GroundingVertexAiSearchSample
{
    public async Task<string> GenerateTextWithVertexAiSearch(
        string projectId = "your-project-id",
        string location = "us-central1",
        string publisher = "google",
        string model = "gemini-1.5-flash-001",
        string dataStoreLocation = "global",
        string dataStoreId = "your-datastore-id")
    {
        var predictionServiceClient = new PredictionServiceClientBuilder
        {
            Endpoint = $"{location}-aiplatform.googleapis.com"
        }.Build();

        var generateContentRequest = new GenerateContentRequest
        {
            Model = $"projects/{projectId}/locations/{location}/publishers/{publisher}/models/{model}",
            GenerationConfig = new GenerationConfig
            {
                Temperature = 0.0f
            },
            Contents =
            {
                new Content
                {
                    Role = "USER",
                    Parts = { new Part { Text = "How do I make an appointment to renew my driver's license?" } }
                }
            },
            Tools =
            {
                new Tool
                {
                    Retrieval = new Retrieval
                    {
                        VertexAiSearch = new VertexAISearch
                        {
                            Datastore = $"projects/{projectId}/locations/{dataStoreLocation}/collections/default_collection/dataStores/{dataStoreId}"
                        }
                    }
                }
            }
        };

        GenerateContentResponse response = await predictionServiceClient.GenerateContentAsync(generateContentRequest);

        string responseText = response.Candidates[0].Content.Parts[0].Text;
        Console.WriteLine(responseText);

        return responseText;
    }
}

Python

import vertexai

from vertexai.preview.generative_models import (
    GenerationConfig,
    GenerativeModel,
    Tool,
    grounding,
)

# TODO(developer): Update and un-comment below lines
# PROJECT_ID = "your-project-id"
# data_store_id = "your-data-store-id"

vertexai.init(project=PROJECT_ID, location="us-central1")

model = GenerativeModel("gemini-1.5-flash-001")

tool = Tool.from_retrieval(
    grounding.Retrieval(
        grounding.VertexAISearch(
            datastore=data_store_id,
            project=PROJECT_ID,
            location="global",
        )
    )
)

prompt = "How do I make an appointment to renew my driver's license?"
response = model.generate_content(
    prompt,
    tools=[tool],
    generation_config=GenerationConfig(
        temperature=0.0,
    ),
)

print(response.text)

Grounding:Web

C#


using Google.Cloud.AIPlatform.V1;
using System;
using System.Threading.Tasks;

public class GroundingWebSample
{
    public async Task<string> GenerateTextWithGroundingWeb(
        string projectId = "your-project-id",
        string location = "us-central1",
        string publisher = "google",
        string model = "gemini-1.5-flash-001"
    )
    {
        var predictionServiceClient = new PredictionServiceClientBuilder
        {
            Endpoint = $"{location}-aiplatform.googleapis.com"
        }.Build();


        var generateContentRequest = new GenerateContentRequest
        {
            Model = $"projects/{projectId}/locations/{location}/publishers/{publisher}/models/{model}",
            GenerationConfig = new GenerationConfig
            {
                Temperature = 0.0f
            },
            Contents =
            {
                new Content
                {
                    Role = "USER",
                    Parts = { new Part { Text = "When is the next total solar eclipse in US?" } }
                }
            },
            Tools =
            {
                new Tool
                {
                    GoogleSearchRetrieval = new GoogleSearchRetrieval()
                }
            }
        };

        GenerateContentResponse response = await predictionServiceClient.GenerateContentAsync(generateContentRequest);

        string responseText = response.Candidates[0].Content.Parts[0].Text;
        Console.WriteLine(responseText);

        return responseText;
    }
}

Python

import vertexai

from vertexai.generative_models import (
    GenerationConfig,
    GenerativeModel,
    Tool,
    grounding,
)

# TODO(developer): Update and un-comment below line
# PROJECT_ID = "your-project-id"
vertexai.init(project=PROJECT_ID, location="us-central1")

model = GenerativeModel("gemini-1.5-flash-001")

# Use Google Search for grounding
tool = Tool.from_google_search_retrieval(grounding.GoogleSearchRetrieval())

prompt = "When is the next total solar eclipse in US?"
response = model.generate_content(
    prompt,
    tools=[tool],
    generation_config=GenerationConfig(
        temperature=0.0,
    ),
)

print(response.text)
# Example response:
# The next total solar eclipse visible from the contiguous United States will be on **August 23, 2044**.

Text

Go

import (
	"context"
	"errors"
	"fmt"
	"io"

	"cloud.google.com/go/vertexai/genai"
	"google.golang.org/api/iterator"
)

// generateContent shows how to	send a basic streaming text prompt, writing
// the response to the provided io.Writer.
func generateContent(w io.Writer, projectID, modelName string) error {
	ctx := context.Background()

	client, err := genai.NewClient(ctx, projectID, "us-central1")
	if err != nil {
		return fmt.Errorf("unable to create client: %w", err)
	}
	defer client.Close()

	model := client.GenerativeModel(modelName)

	iter := model.GenerateContentStream(
		ctx,
		genai.Text("Write a story about a magic backpack."),
	)
	for {
		resp, err := iter.Next()
		if err == iterator.Done {
			return nil
		}
		if len(resp.Candidates) == 0 || len(resp.Candidates[0].Content.Parts) == 0 {
			return errors.New("empty response from model")
		}
		if err != nil {
			return err
		}
		fmt.Fprint(w, "generated response: ")
		for _, c := range resp.Candidates {
			for _, p := range c.Content.Parts {
				fmt.Fprintf(w, "%s ", p)
			}
		}
	}
}

Java

import com.google.cloud.vertexai.VertexAI;
import com.google.cloud.vertexai.generativeai.GenerativeModel;

public class StreamingQuestionAnswer {

  public static void main(String[] args) throws Exception {
    // TODO(developer): Replace these variables before running the sample.
    String projectId = "your-google-cloud-project-id";
    String location = "us-central1";
    String modelName = "gemini-1.5-flash-001";

    streamingQuestion(projectId, location, modelName);
  }

  // Ask a simple question and get the response via streaming.
  public static void streamingQuestion(String projectId, String location, String modelName)
      throws Exception {
    // Initialize client that will be used to send requests.
    // This client only needs to be created once, and can be reused for multiple requests.
    try (VertexAI vertexAI = new VertexAI(projectId, location)) {
      GenerativeModel model = new GenerativeModel(modelName, vertexAI);

      // Stream the result.
      model.generateContentStream("Write a story about a magic backpack.")
          .stream()
          .forEach(System.out::println);

      System.out.println("Streaming complete.");
    }
  }
}

Node.js

const {VertexAI} = require('@google-cloud/vertexai');

/**
 * TODO(developer): Update these variables before running the sample.
 */
const PROJECT_ID = process.env.CAIP_PROJECT_ID;
const LOCATION = process.env.LOCATION;
const MODEL = 'gemini-1.5-flash-001';

async function generateContent() {
  // Initialize Vertex with your Cloud project and location
  const vertexAI = new VertexAI({project: PROJECT_ID, location: LOCATION});

  // Instantiate the model
  const generativeModel = vertexAI.getGenerativeModel({
    model: MODEL,
  });

  const request = {
    contents: [
      {
        role: 'user',
        parts: [
          {
            text: 'Write a story about a magic backpack.',
          },
        ],
      },
    ],
  };

  console.log(JSON.stringify(request));

  const result = await generativeModel.generateContentStream(request);
  for await (const item of result.stream) {
    console.log(item.candidates[0].content.parts[0].text);
  }
}

Python

import vertexai

from vertexai.generative_models import GenerativeModel

# TODO(developer): Update and un-comment below line
# PROJECT_ID = "your-project-id"
vertexai.init(project=PROJECT_ID, location="us-central1")

model = GenerativeModel("gemini-1.5-flash-002")
responses = model.generate_content(
    "Write a story about a magic backpack.", stream=True
)

for response in responses:
    print(response.text)
# Example response:
# El
# ara wasn't looking for magic. She was looking for rent money.
# Her tiny apartment, perched precariously on the edge of Whispering Woods,
# ...

Multimodal

Go

import (
	"context"
	"errors"
	"fmt"
	"io"

	"cloud.google.com/go/vertexai/genai"
	"google.golang.org/api/iterator"
)

func generateContent(w io.Writer, projectID, modelName string) error {
	ctx := context.Background()

	client, err := genai.NewClient(ctx, projectID, "us-central1")
	if err != nil {
		return fmt.Errorf("unable to create client: %w", err)
	}
	defer client.Close()

	model := client.GenerativeModel(modelName)
	iter := model.GenerateContentStream(
		ctx,
		genai.FileData{
			MIMEType: "video/mp4",
			FileURI:  "gs://cloud-samples-data/generative-ai/video/animals.mp4",
		},
		genai.FileData{
			MIMEType: "video/jpeg",
			FileURI:  "gs://cloud-samples-data/generative-ai/image/character.jpg",
		},
		genai.Text("Are these video and image correlated?"),
	)
	for {
		resp, err := iter.Next()
		if err == iterator.Done {
			return nil
		}
		if len(resp.Candidates) == 0 || len(resp.Candidates[0].Content.Parts) == 0 {
			return errors.New("empty response from model")
		}
		if err != nil {
			return err
		}

		fmt.Fprint(w, "generated response: ")
		for _, c := range resp.Candidates {
			for _, p := range c.Content.Parts {
				fmt.Fprintf(w, "%s ", p)
			}
		}
		fmt.Fprint(w, "\n")
	}
}

Java

import com.google.cloud.vertexai.VertexAI;
import com.google.cloud.vertexai.generativeai.ContentMaker;
import com.google.cloud.vertexai.generativeai.GenerativeModel;
import com.google.cloud.vertexai.generativeai.PartMaker;

public class StreamingMultimodal {
  public static void main(String[] args) throws Exception {
    // TODO(developer): Replace these variables before running the sample.
    String projectId = "your-google-cloud-project-id";
    String location = "us-central1";
    String modelName = "gemini-1.5-flash-001";

    streamingMultimodal(projectId, location, modelName);
  }

  // Ask a simple question and get the response via streaming.
  public static void streamingMultimodal(String projectId, String location, String modelName)
      throws Exception {
    // Initialize client that will be used to send requests.
    // This client only needs to be created once, and can be reused for multiple requests.
    try (VertexAI vertexAI = new VertexAI(projectId, location)) {
      GenerativeModel model = new GenerativeModel(modelName, vertexAI);

      String videoUri = "gs://cloud-samples-data/video/animals.mp4";
      String imgUri = "gs://cloud-samples-data/generative-ai/image/character.jpg";

      // Stream the result.
      model.generateContentStream(
          ContentMaker.fromMultiModalData(
              PartMaker.fromMimeTypeAndData("video/mp4", videoUri),
              PartMaker.fromMimeTypeAndData("image/jpeg", imgUri),
              "Are this video and image correlated?"
          ))
          .stream()
          .forEach(System.out::println);
    }
  }
}

Node.js

const {VertexAI} = require('@google-cloud/vertexai');

/**
 * TODO(developer): Update these variables before running the sample.
 */
const PROJECT_ID = process.env.CAIP_PROJECT_ID;
const LOCATION = process.env.LOCATION;
const MODEL = 'gemini-1.5-flash-001';

async function generateContent() {
  // Initialize Vertex AI
  const vertexAI = new VertexAI({project: PROJECT_ID, location: LOCATION});
  const generativeModel = vertexAI.getGenerativeModel({model: MODEL});

  const request = {
    contents: [
      {
        role: 'user',
        parts: [
          {
            file_data: {
              file_uri: 'gs://cloud-samples-data/video/animals.mp4',
              mime_type: 'video/mp4',
            },
          },
          {
            file_data: {
              file_uri:
                'gs://cloud-samples-data/generative-ai/image/character.jpg',
              mime_type: 'image/jpeg',
            },
          },
          {text: 'Are this video and image correlated?'},
        ],
      },
    ],
  };

  const result = await generativeModel.generateContentStream(request);

  for await (const item of result.stream) {
    console.log(item.candidates[0].content.parts[0].text);
  }
}

Python

import vertexai

from vertexai.generative_models import GenerativeModel, Part

# TODO(developer): Update & un-comment the lines below
# PROJECT_ID = "your-project-id"

vertexai.init(project=PROJECT_ID, location="us-central1")

model = GenerativeModel("gemini-1.5-flash-002")
responses = model.generate_content(
    [
        Part.from_uri(
            "gs://cloud-samples-data/generative-ai/video/animals.mp4", "video/mp4"
        ),
        Part.from_uri(
            "gs://cloud-samples-data/generative-ai/image/character.jpg",
            "image/jpeg",
        ),
        "Are these video and image correlated?",
    ],
    stream=True,
)

for response in responses:
    print(response)

Chat

Node.js

const {VertexAI} = require('@google-cloud/vertexai');

/**
 * TODO(developer): Update these variables before running the sample.
 */
async function createStreamChat(
  projectId = 'PROJECT_ID',
  location = 'us-central1',
  model = 'gemini-1.5-flash-001'
) {
  // Initialize Vertex with your Cloud project and location
  const vertexAI = new VertexAI({project: projectId, location: location});

  // Instantiate the model
  const generativeModel = vertexAI.getGenerativeModel({
    model: model,
  });

  const chat = generativeModel.startChat({});
  const chatInput1 = 'How can I learn more about that?';

  console.log(`User: ${chatInput1}`);

  const result1 = await chat.sendMessageStream(chatInput1);
  for await (const item of result1.stream) {
    console.log(item.candidates[0].content.parts[0].text);
  }
}

Python

import vertexai

from vertexai.generative_models import GenerativeModel, ChatSession

# TODO(developer): Update and un-comment below line
# PROJECT_ID = "your-project-id"
vertexai.init(project=PROJECT_ID, location="us-central1")

model = GenerativeModel("gemini-1.5-flash-002")

chat_session = model.start_chat()

def get_chat_response(chat: ChatSession, prompt: str) -> str:
    text_response = []
    responses = chat.send_message(prompt, stream=True)
    for chunk in responses:
        text_response.append(chunk.text)
    return "".join(text_response)

prompt = "Hello."
print(get_chat_response(chat_session, prompt))
# Example response:
# Hello there! How can I help you today?

prompt = "What are all the colors in a rainbow?"
print(get_chat_response(chat_session, prompt))
# Example response:
# The colors in a rainbow are often remembered using the acronym ROY G. BIV:
# * **Red**
# * **Orange** ...

prompt = "Why does it appear when it rains?"
print(get_chat_response(chat_session, prompt))
# Example response:
# It's important to note that these colors blend smoothly into each other, ...

Function calling (basic)

C#


using Google.Cloud.AIPlatform.V1;
using System;
using System.Threading.Tasks;
using Type = Google.Cloud.AIPlatform.V1.Type;
using Value = Google.Protobuf.WellKnownTypes.Value;

public class FunctionCalling
{
    public async Task<string> GenerateFunctionCall(
        string projectId = "your-project-id",
        string location = "us-central1",
        string publisher = "google",
        string model = "gemini-1.5-flash-001")
    {
        var predictionServiceClient = new PredictionServiceClientBuilder
        {
            Endpoint = $"{location}-aiplatform.googleapis.com"
        }.Build();

        // Define the user's prompt in a Content object that we can reuse in
        // model calls
        var userPromptContent = new Content
        {
            Role = "USER",
            Parts =
            {
                new Part { Text = "What is the weather like in Boston?" }
            }
        };

        // Specify a function declaration and parameters for an API request
        var functionName = "get_current_weather";
        var getCurrentWeatherFunc = new FunctionDeclaration
        {
            Name = functionName,
            Description = "Get the current weather in a given location",
            Parameters = new OpenApiSchema
            {
                Type = Type.Object,
                Properties =
                {
                    ["location"] = new()
                    {
                        Type = Type.String,
                        Description = "Get the current weather in a given location"
                    },
                    ["unit"] = new()
                    {
                        Type = Type.String,
                        Description = "The unit of measurement for the temperature",
                        Enum = {"celsius", "fahrenheit"}
                    }
                },
                Required = { "location" }
            }
        };

        // Send the prompt and instruct the model to generate content using the tool that you just created
        var generateContentRequest = new GenerateContentRequest
        {
            Model = $"projects/{projectId}/locations/{location}/publishers/{publisher}/models/{model}",
            GenerationConfig = new GenerationConfig
            {
                Temperature = 0f
            },
            Contents =
            {
                userPromptContent
            },
            Tools =
            {
                new Tool
                {
                    FunctionDeclarations = { getCurrentWeatherFunc }
                }
            }
        };

        GenerateContentResponse response = await predictionServiceClient.GenerateContentAsync(generateContentRequest);

        var functionCall = response.Candidates[0].Content.Parts[0].FunctionCall;
        Console.WriteLine(functionCall);

        string apiResponse = "";

        // Check the function name that the model responded with, and make an API call to an external system
        if (functionCall.Name == functionName)
        {
            // Extract the arguments to use in your API call
            string locationCity = functionCall.Args.Fields["location"].StringValue;

            // Here you can use your preferred method to make an API request to
            // fetch the current weather

            // In this example, we'll use synthetic data to simulate a response
            // payload from an external API
            apiResponse = @"{ ""location"": ""Boston, MA"",
                    ""temperature"": 38, ""description"": ""Partly Cloudy""}";
        }

        // Return the API response to Gemini so it can generate a model response or request another function call
        generateContentRequest = new GenerateContentRequest
        {
            Model = $"projects/{projectId}/locations/{location}/publishers/{publisher}/models/{model}",
            Contents =
            {
                userPromptContent, // User prompt
                response.Candidates[0].Content, // Function call response,
                new Content
                {
                    Parts =
                    {
                        new Part
                        {
                            FunctionResponse = new()
                            {
                                Name = functionName,
                                Response = new()
                                {
                                    Fields =
                                    {
                                        { "content", new Value { StringValue = apiResponse } }
                                    }
                                }
                            }
                        }
                    }
                }
            },
            Tools =
            {
                new Tool
                {
                    FunctionDeclarations = { getCurrentWeatherFunc }
                }
            }
        };

        response = await predictionServiceClient.GenerateContentAsync(generateContentRequest);

        string responseText = response.Candidates[0].Content.Parts[0].Text;
        Console.WriteLine(responseText);

        return responseText;
    }
}

Go

import (
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"

	"cloud.google.com/go/vertexai/genai"
)

// functionCallsBasic opens a chat session and sends 2 messages to the model:
// - first, to convert a text into a structured function call request
// - second, to convert a structured function call response into natural language.
// Writes output of second call to w.
func functionCallsBasic(w io.Writer, projectID, location, modelName string) error {
	// location := "us-central1"
	// modelName := "gemini-1.5-flash-001"
	ctx := context.Background()
	client, err := genai.NewClient(ctx, projectID, location)
	if err != nil {
		return fmt.Errorf("unable to create client: %w", err)
	}
	defer client.Close()

	model := client.GenerativeModel(modelName)

	// Build an OpenAPI schema, in memory
	params := &genai.Schema{
		Type: genai.TypeObject,
		Properties: map[string]*genai.Schema{
			"location": {
				Type:        genai.TypeString,
				Description: "location",
			},
		},
	}
	fundecl := &genai.FunctionDeclaration{
		Name:        "getCurrentWeather",
		Description: "Get the current weather in a given location",
		Parameters:  params,
	}
	model.Tools = []*genai.Tool{
		{FunctionDeclarations: []*genai.FunctionDeclaration{fundecl}},
	}

	chat := model.StartChat()

	resp, err := chat.SendMessage(ctx, genai.Text("What's the weather like in Boston?"))
	if err != nil {
		return err
	}
	if len(resp.Candidates) == 0