使用 Gemini 模型进行批量文本预测
使用集合让一切井井有条
根据您的偏好保存内容并对其进行分类。
使用 Gemini 模型执行批量文本预测,并返回输出位置。
深入探索
如需查看包含此代码示例的详细文档,请参阅以下内容:
代码示例
如未另行说明,那么本页面中的内容已根据知识共享署名 4.0 许可获得了许可,并且代码示例已根据 Apache 2.0 许可获得了许可。有关详情,请参阅 Google 开发者网站政策。Java 是 Oracle 和/或其关联公司的注册商标。
[[["易于理解","easyToUnderstand","thumb-up"],["解决了我的问题","solvedMyProblem","thumb-up"],["其他","otherUp","thumb-up"]],[["很难理解","hardToUnderstand","thumb-down"],["信息或示例代码不正确","incorrectInformationOrSampleCode","thumb-down"],["没有我需要的信息/示例","missingTheInformationSamplesINeed","thumb-down"],["翻译问题","translationIssue","thumb-down"],["其他","otherDown","thumb-down"]],[],[],[],null,["# Batch text prediction with Gemini model\n\nPerform batch text prediction using Gemini model and returns the output location.\n\nExplore further\n---------------\n\n\nFor detailed documentation that includes this code sample, see the following:\n\n- [Get batch predictions for Gemini](/vertex-ai/generative-ai/docs/model-reference/batch-prediction-api)\n\nCode sample\n-----------\n\n### Go\n\n\nBefore trying this sample, follow the Go setup instructions in the\n[Vertex AI quickstart using\nclient libraries](/vertex-ai/docs/start/client-libraries).\n\n\nFor more information, see the\n[Vertex AI Go API\nreference documentation](/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1).\n\n\nTo authenticate to Vertex AI, set up Application Default Credentials.\nFor more information, see\n\n[Set up authentication for a local development environment](/docs/authentication/set-up-adc-local-dev-environment).\n\n import (\n \t\"context\"\n \t\"fmt\"\n \t\"io\"\n \t\"time\"\n\n \taiplatform \"cloud.google.com/go/aiplatform/apiv1\"\n \taiplatformpb \"cloud.google.com/go/aiplatform/apiv1/aiplatformpb\"\n\n \t\"google.golang.org/api/option\"\n \t\"google.golang.org/protobuf/types/known/structpb\"\n )\n\n // batchPredictGCS submits a batch prediction job using GCS data source as its input\n func batchPredictGCS(w io.Writer, projectID, location string, inputURIs []string, outputURI string) error {\n \t// location := \"us-central1\"\n \t// inputURIs := []string{\"gs://cloud-samples-data/batch/prompt_for_batch_gemini_predict.jsonl\"}\n \t// outputURI := \"gs://\u003ccloud-bucket-name\u003e/\u003cprefix-name\u003e\"\n \tmodelName := \"gemini-2.0-flash-001\"\n \tjobName := \"batch-predict-gcs-test-001\"\n\n \tctx := context.Background()\n \tapiEndpoint := fmt.Sprintf(\"%s-aiplatform.googleapis.com:443\", location)\n \tclient, err := aiplatform.https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1.html#cloud_google_com_go_aiplatform_apiv1_JobClient_NewJobClient(ctx, option.WithEndpoint(apiEndpoint))\n \tif err != nil {\n \t\treturn fmt.Errorf(\"unable to create aiplatform client: %w\", err)\n \t}\n \tdefer client.Close()\n\n \tmodelParameters, err := structpb.NewValue(map[string]interface{}{\n \t\t\"temperature\": 0.2,\n \t\t\"maxOutputTokens\": 200,\n \t})\n \tif err != nil {\n \t\treturn fmt.Errorf(\"unable to convert model parameters to protobuf value: %w\", err)\n \t}\n\n \treq := &aiplatformpb.CreateBatchPredictionJobRequest{\n \t\tParent: fmt.Sprintf(\"projects/%s/locations/%s\", projectID, location),\n \t\tBatchPredictionJob: &aiplatformpb.BatchPredictionJob{\n \t\t\tDisplayName: jobName,\n \t\t\tModel: fmt.Sprintf(\"publishers/google/models/%s\", modelName),\n \t\t\tModelParameters: modelParameters,\n \t\t\t// Check the API reference for `BatchPredictionJob` for supported input and output formats:\n \t\t\t// https://cloud.google.com/vertex-ai/docs/reference/rpc/google.cloud.aiplatform.v1#google.cloud.aiplatform.v1.BatchPredictionJob\n \t\t\tInputConfig: &aiplatformpb.BatchPredictionJob_InputConfig{\n \t\t\t\tSource: &aiplatformpb.BatchPredictionJob_InputConfig_GcsSource{\n \t\t\t\t\tGcsSource: &aiplatformpb.GcsSource{\n \t\t\t\t\t\tUris: inputURIs,\n \t\t\t\t\t},\n \t\t\t\t},\n \t\t\t\tInstancesFormat: \"jsonl\",\n \t\t\t},\n \t\t\tOutputConfig: &aiplatformpb.BatchPredictionJob_OutputConfig{\n \t\t\t\tDestination: &aiplatformpb.BatchPredictionJob_OutputConfig_GcsDestination{\n \t\t\t\t\tGcsDestination: &aiplatformpb.GcsDestination{\n \t\t\t\t\t\tOutputUriPrefix: outputURI,\n \t\t\t\t\t},\n \t\t\t\t},\n \t\t\t\tPredictionsFormat: \"jsonl\",\n \t\t\t},\n \t\t},\n \t}\n\n \tjob, err := client.CreateBatchPredictionJob(ctx, req)\n \tif err != nil {\n \t\treturn err\n \t}\n \tfullJobId := job.GetName()\n \tfmt.Fprintf(w, \"submitted batch predict job for model %q\\n\", job.GetModel())\n \tfmt.Fprintf(w, \"job id: %q\\n\", fullJobId)\n \tfmt.Fprintf(w, \"job state: %s\\n\", job.GetState())\n \t// Example response:\n \t// submitted batch predict job for model \"publishers/google/models/gemini-2.0-flash-001\"\n \t// job id: \"projects/.../locations/.../batchPredictionJobs/1234567890000000000\"\n \t// job state: JOB_STATE_PENDING\n\n \tfor {\n \t\ttime.Sleep(5 * time.Second)\n\n \t\tjob, err := client.GetBatchPredictionJob(ctx, &aiplatformpb.GetBatchPredictionJobRequest{\n \t\t\tName: fullJobId,\n \t\t})\n \t\tif err != nil {\n \t\t\treturn fmt.Errorf(\"error: couldn't get updated job state: %w\", err)\n \t\t}\n\n \t\tif job.GetEndTime() != nil {\n \t\t\tfmt.Fprintf(w, \"batch predict job finished with state %s\\n\", job.GetState())\n \t\t\tbreak\n \t\t} else {\n \t\t\tfmt.Fprintf(w, \"batch predict job is running... job state is %s\\n\", job.GetState())\n \t\t}\n \t}\n\n \treturn nil\n }\n\nWhat's next\n-----------\n\n\nTo search and filter code samples for other Google Cloud products, see the\n[Google Cloud sample browser](/docs/samples?product=generativeaionvertexai)."]]