使用发布方聊天模型测试文本提示。
深入探索
如需查看包含此代码示例的详细文档,请参阅以下内容:
代码示例
C#
在尝试此示例之前,请按照《Vertex AI 快速入门:使用客户端库》中的 C# 设置说明执行操作。如需了解详情,请参阅 Vertex AI C# API 参考文档。
如需向 Vertex AI 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
using Google.Cloud.AIPlatform.V1;
using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.Linq;
using Value = Google.Protobuf.WellKnownTypes.Value;
public class PredictChatPromptSample
{
public string PredictChatPrompt(
string projectId = "your-project-id",
string locationId = "us-central1",
string publisher = "google",
string model = "chat-bison@001"
)
{
// Initialize client that will be used to send requests.
// This client only needs to be created once,
// and can be reused for multiple requests.
var client = new PredictionServiceClientBuilder
{
Endpoint = $"{locationId}-aiplatform.googleapis.com"
}.Build();
// Configure the parent resource.
var endpoint = EndpointName.FromProjectLocationPublisherModel(projectId, locationId, publisher, model);
// Initialize request argument(s).
var prompt = "How many planets are there in the solar system?";
// You can construct Protobuf from JSON.
var instanceJson = JsonConvert.SerializeObject(new
{
context = "My name is Miles. You are an astronomer, knowledgeable about the solar system.",
examples = new[]
{
new
{
input = new { content = "How many moons does Mars have?" },
output = new { content = "The planet Mars has two moons, Phobos and Deimos." }
}
},
messages = new[]
{
new
{
author = "user",
content = prompt
}
}
});
var instance = Value.Parser.ParseJson(instanceJson);
var instances = new List<Value>
{
instance
};
// You can construct Protobuf from JSON.
var parametersJson = JsonConvert.SerializeObject(new
{
temperature = 0.3,
maxDecodeSteps = 200,
topP = 0.8,
topK = 40
});
var parameters = Value.Parser.ParseJson(parametersJson);
// Make the request.
var response = client.Predict(endpoint, instances, parameters);
// Parse the response and return the content.
var content = response.Predictions.First().StructValue.Fields["candidates"].ListValue.Values[0].StructValue.Fields["content"].StringValue;
Console.WriteLine($"Content: {content}");
return content;
}
}
Java
在尝试此示例之前,请按照《Vertex AI 快速入门:使用客户端库》中的 Java 设置说明执行操作。如需了解详情,请参阅 Vertex AI Java API 参考文档。
如需向 Vertex AI 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
import com.google.cloud.aiplatform.v1beta1.EndpointName;
import com.google.cloud.aiplatform.v1beta1.PredictResponse;
import com.google.cloud.aiplatform.v1beta1.PredictionServiceClient;
import com.google.cloud.aiplatform.v1beta1.PredictionServiceSettings;
import com.google.protobuf.Value;
import com.google.protobuf.util.JsonFormat;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
// Send a Predict request to a large language model to test a chat prompt
public class PredictChatPromptSample {
public static void main(String[] args) throws IOException {
// TODO(developer): Replace these variables before running the sample.
String instance =
"{\n"
+ " \"context\": \"My name is Ned. You are my personal assistant. My favorite movies"
+ " are Lord of the Rings and Hobbit.\",\n"
+ " \"examples\": [ { \n"
+ " \"input\": {\"content\": \"Who do you work for?\"},\n"
+ " \"output\": {\"content\": \"I work for Ned.\"}\n"
+ " },\n"
+ " { \n"
+ " \"input\": {\"content\": \"What do I like?\"},\n"
+ " \"output\": {\"content\": \"Ned likes watching movies.\"}\n"
+ " }],\n"
+ " \"messages\": [\n"
+ " { \n"
+ " \"author\": \"user\",\n"
+ " \"content\": \"Are my favorite movies based on a book series?\"\n"
+ " }]\n"
+ "}";
String parameters =
"{\n"
+ " \"temperature\": 0.3,\n"
+ " \"maxDecodeSteps\": 200,\n"
+ " \"topP\": 0.8,\n"
+ " \"topK\": 40\n"
+ "}";
String project = "YOUR_PROJECT_ID";
String publisher = "google";
String model = "chat-bison@001";
predictChatPrompt(instance, parameters, project, publisher, model);
}
static void predictChatPrompt(
String instance, String parameters, String project, String publisher, String model)
throws IOException {
PredictionServiceSettings predictionServiceSettings =
PredictionServiceSettings.newBuilder()
.setEndpoint("us-central1-aiplatform.googleapis.com:443")
.build();
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests.
try (PredictionServiceClient predictionServiceClient =
PredictionServiceClient.create(predictionServiceSettings)) {
String location = "us-central1";
final EndpointName endpointName =
EndpointName.ofProjectLocationPublisherModelName(project, location, publisher, model);
Value.Builder instanceValue = Value.newBuilder();
JsonFormat.parser().merge(instance, instanceValue);
List<Value> instances = new ArrayList<>();
instances.add(instanceValue.build());
Value.Builder parameterValueBuilder = Value.newBuilder();
JsonFormat.parser().merge(parameters, parameterValueBuilder);
Value parameterValue = parameterValueBuilder.build();
PredictResponse predictResponse =
predictionServiceClient.predict(endpointName, instances, parameterValue);
System.out.println("Predict Response");
}
}
}
Node.js
在尝试此示例之前,请按照《Vertex AI 快速入门:使用客户端库》中的 Node.js 设置说明执行操作。如需了解详情,请参阅 Vertex AI Node.js API 参考文档。
如需向 Vertex AI 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const project = 'YOUR_PROJECT_ID';
// const location = 'YOUR_PROJECT_LOCATION';
const aiplatform = require('@google-cloud/aiplatform');
// Imports the Google Cloud Prediction service client
const {PredictionServiceClient} = aiplatform.v1;
// Import the helper module for converting arbitrary protobuf.Value objects.
const {helpers} = aiplatform;
// Specifies the location of the api endpoint
const clientOptions = {
apiEndpoint: 'us-central1-aiplatform.googleapis.com',
};
const publisher = 'google';
const model = 'chat-bison@001';
// Instantiates a client
const predictionServiceClient = new PredictionServiceClient(clientOptions);
async function callPredict() {
// Configure the parent resource
const endpoint = `projects/${project}/locations/${location}/publishers/${publisher}/models/${model}`;
const prompt = {
context:
'My name is Miles. You are an astronomer, knowledgeable about the solar system.',
examples: [
{
input: {content: 'How many moons does Mars have?'},
output: {
content: 'The planet Mars has two moons, Phobos and Deimos.',
},
},
],
messages: [
{
author: 'user',
content: 'How many planets are there in the solar system?',
},
],
};
const instanceValue = helpers.toValue(prompt);
const instances = [instanceValue];
const parameter = {
temperature: 0.2,
maxOutputTokens: 256,
topP: 0.95,
topK: 40,
};
const parameters = helpers.toValue(parameter);
const request = {
endpoint,
instances,
parameters,
};
// Predict request
const [response] = await predictionServiceClient.predict(request);
console.log('Get chat prompt response');
const predictions = response.predictions;
console.log('\tPredictions :');
for (const prediction of predictions) {
console.log(`\t\tPrediction : ${JSON.stringify(prediction)}`);
}
}
callPredict();
Python
在尝试此示例之前,请按照《Vertex AI 快速入门:使用客户端库》中的 Python 设置说明执行操作。如需了解详情,请参阅 Vertex AI Python API 参考文档。
如需向 Vertex AI 进行身份验证,请设置应用默认凭据。 如需了解详情,请参阅为本地开发环境设置身份验证。
from vertexai.language_models import ChatModel, InputOutputTextPair
def science_tutoring(temperature: float = 0.2) -> None:
chat_model = ChatModel.from_pretrained("chat-bison@001")
# TODO developer - override these parameters as needed:
parameters = {
"temperature": temperature, # Temperature controls the degree of randomness in token selection.
"max_output_tokens": 256, # Token limit determines the maximum amount of text output.
"top_p": 0.95, # Tokens are selected from most probable to least until the sum of their probabilities equals the top_p value.
"top_k": 40, # A top_k of 1 means the selected token is the most probable among all tokens.
}
chat = chat_model.start_chat(
context="My name is Miles. You are an astronomer, knowledgeable about the solar system.",
examples=[
InputOutputTextPair(
input_text="How many moons does Mars have?",
output_text="The planet Mars has two moons, Phobos and Deimos.",
),
],
)
response = chat.send_message(
"How many planets are there in the solar system?", **parameters
)
print(f"Response from Model: {response.text}")
return response
后续步骤
如需搜索和过滤其他 Google Cloud 产品的代码示例,请参阅 Google Cloud 示例浏览器。