Package Genai è un client per il modello generativo Vertex AI.
Funzioni
func Ptr
Ptr restituisce un puntatore al relativo argomento. Può essere utilizzato per inizializzare i campi puntatore:
model.Temperature = genai.Ptr[float32](0.1)
func WithREST
func WithREST() option.ClientOption
Con REST è un'opzione che consente il trasporto REST per il client. Il trasporto predefinito (se questa opzione non viene fornita) è gRPC.
Blob
type Blob struct {
// Required. The IANA standard MIME type of the source data.
MIMEType string
// Required. Raw bytes.
Data []byte
}
Il BLOB contiene dati binari come le immagini. Usa [Testo] per il testo.
func ImageData
ImageData è una funzione di convenienza per creare un BLOB di immagine da inserire in un modello. Il formato deve essere la seconda parte del tipo MIME, dopo "image/". Ad esempio, per un'immagine PNG, aggiungi "png".
BlockedError
type BlockedError struct {
// If non-nil, the model's response was blocked.
// Consult the Candidate and SafetyRatings fields for details.
Candidate *Candidate
// If non-nil, there was a problem with the prompt.
PromptFeedback *PromptFeedback
}
Un errore BLOCKEDError indica che la risposta del modello è stata bloccata. Le cause possono essere due: il prompt o la risposta di un candidato.
func (*BlockedError) Error
func (e *BlockedError) Error() string
BlockedReason
type BlockedReason int32
L'enumerazione dei motivi bloccata è bloccata.
BLOCKEDReasonUnspecified, RestrictedReasonSafety, AllowedReasonOther, ngReasonBlocklist, RestrictedReasonProhibitedContent
const (
// BlockedReasonUnspecified means unspecified blocked reason.
BlockedReasonUnspecified BlockedReason = 0
// BlockedReasonSafety means candidates blocked due to safety.
BlockedReasonSafety BlockedReason = 1
// BlockedReasonOther means candidates blocked due to other reason.
BlockedReasonOther BlockedReason = 2
// BlockedReasonBlocklist means candidates blocked due to the terms which are included from the
// terminology blocklist.
BlockedReasonBlocklist BlockedReason = 3
// BlockedReasonProhibitedContent means candidates blocked due to prohibited content.
BlockedReasonProhibitedContent BlockedReason = 4
)
func (BlockedReason) String
func (v BlockedReason) String() string
Candidato
type Candidate struct {
// Output only. Index of the candidate.
Index int32
// Output only. Content parts of the candidate.
Content *Content
// Output only. The reason why the model stopped generating tokens.
// If empty, the model has not stopped generating the tokens.
FinishReason FinishReason
// Output only. List of ratings for the safety of a response candidate.
//
// There is at most one rating per category.
SafetyRatings []*SafetyRating
// Output only. Describes the reason the mode stopped generating tokens in
// more detail. This is only filled when `finish_reason` is set.
FinishMessage string
// Output only. Source attribution of the generated content.
CitationMetadata *CitationMetadata
}
Il candidato è un candidato di risposta generato dal modello.
func (*Candidate) FunctionCalls
func (c *Candidate) FunctionCalls() []FunctionCall
FunctionCall restituisce tutte le parti FunctionCall nel candidato.
ChatSession
type ChatSession struct {
History []*Content
// contains filtered or unexported fields
}
Una ChatSession offre una chat interattiva.
Esempio
package main
import (
"context"
"fmt"
"log"
"cloud.google.com/go/vertexai/genai"
"google.golang.org/api/iterator"
)
// Your GCP project
const projectID = "your-project"
// A GCP location like "us-central1"
const location = "some-gcp-location"
// A model name like "gemini-1.0-pro"
const model = "some-model"
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, projectID, location)
if err != nil {
log.Fatal(err)
}
defer client.Close()
model := client.GenerativeModel(model)
cs := model.StartChat()
send := func(msg string) *genai.GenerateContentResponse {
fmt.Printf("== Me: %s\n== Model:\n", msg)
res, err := cs.SendMessage(ctx, genai.Text(msg))
if err != nil {
log.Fatal(err)
}
return res
}
res := send("Can you name some brands of air fryer?")
printResponse(res)
iter := cs.SendMessageStream(ctx, genai.Text("Which one of those do you recommend?"))
for {
res, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
printResponse(res)
}
for i, c := range cs.History {
log.Printf(" %d: %+v", i, c)
}
res = send("Why do you like the Philips?")
if err != nil {
log.Fatal(err)
}
printResponse(res)
}
func printResponse(resp *genai.GenerateContentResponse) {
for _, cand := range resp.Candidates {
for _, part := range cand.Content.Parts {
fmt.Println(part)
}
}
fmt.Println("---")
}
func (*ChatSession) SendMessage
func (cs *ChatSession) SendMessage(ctx context.Context, parts ...Part) (*GenerateContentResponse, error)
SendMessage invia una richiesta al modello come parte di una sessione di chat.
func (*ChatSession) SendMessageStream
func (cs *ChatSession) SendMessageStream(ctx context.Context, parts ...Part) *GenerateContentResponseIterator
SendMessageStream è come SendMessage, ma con una richiesta di streaming.
Citazione
type Citation struct {
// Output only. Start index into the content.
StartIndex int32
// Output only. End index into the content.
EndIndex int32
// Output only. Url reference of the attribution.
URI string
// Output only. Title of the attribution.
Title string
// Output only. License of the attribution.
License string
// Output only. Publication date of the attribution.
PublicationDate civil.Date
}
La citazione contiene le attribuzioni della fonte dei contenuti.
CitationMetadata
type CitationMetadata struct {
// Output only. List of citations.
Citations []*Citation
}
CitationMetadata è una raccolta di attribuzioni di fonti per un contenuto.
Client
type Client struct {
// contains filtered or unexported fields
}
Un client è un client di Google Vertex AI.
func NewClient
func NewClient(ctx context.Context, projectID, location string, opts ...option.ClientOption) (*Client, error)
Nuovo Client crea un nuovo client Google Vertex AI.
I client dovrebbero essere riutilizzati anziché creati in base alle esigenze. I metodi del client sono sicuri per l'uso simultaneo da più livelli. projectID è il tuo progetto Google Cloud; la località corrisponde alla regione/località Google Cloud in base a https://cloud.google.com/vertex-ai/docs/general/locations
Puoi configurare il client passando le opzioni dal pacchetto [google.golang.org/api/option]. Puoi anche utilizzare le opzioni definite in questo pacchetto, ad esempio [ConREST].
func (*Client) Close
Chiudi consente di chiudere il client.
func (*Client) GenerativeModel
func (c *Client) GenerativeModel(name string) *GenerativeModel
GenerativeModel crea una nuova istanza del modello denominato. name è il nome di un modello stringa come "gemini-1.0.-pro". Consulta https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versioning per maggiori dettagli sulla denominazione e sul controllo delle versioni dei modelli.
Contenuti
type Content struct {
// Optional. The producer of the content. Must be either 'user' or 'model'.
//
// Useful to set for multi-turn conversations, otherwise can be left blank
// or unset.
Role string
// Required. Ordered `Parts` that constitute a single message. Parts may have
// different IANA MIME types.
Parts []Part
}
Con "contenuti" si intende il tipo di dati strutturati di base che include contenuti in più parti di un messaggio.
Un Content
include un campo role
che designa il produttore del campo Content
e un campo parts
contenente dati in più parti con i contenuti del turno del messaggio.
CountTokensResponse
type CountTokensResponse struct {
// The total number of tokens counted across all instances from the request.
TotalTokens int32
// The total number of billable characters counted across all instances from
// the request.
TotalBillableCharacters int32
}
CountTokensResponse è un messaggio di risposta per [PredictionService.CountTokens][google.cloud.aiplatform.v1beta1.PredictionService.CountTokens].
FileData
type FileData struct {
// Required. The IANA standard MIME type of the source data.
MIMEType string
// Required. URI.
FileURI string
}
FileData sono dati basati su URI.
FinishReason
type FinishReason int32
FinishReason è il motivo per cui il modello ha smesso di generare token. Se è vuoto, il modello non ha smesso di generare i token.
FinishReasonUnspecified, FinishReasonStop, FinishReasonMaxTokens, FinishReasonSafety, FinishReasonRecitation, FinishReasonOther, FinishReasonBlocklist, FinishReasonProhibitedContent, FinishReasonSpii
const (
// FinishReasonUnspecified means the finish reason is unspecified.
FinishReasonUnspecified FinishReason = 0
// FinishReasonStop means natural stop point of the model or provided stop sequence.
FinishReasonStop FinishReason = 1
// FinishReasonMaxTokens means the maximum number of tokens as specified in the request was reached.
FinishReasonMaxTokens FinishReason = 2
// FinishReasonSafety means the token generation was stopped as the response was flagged for safety
// reasons. NOTE: When streaming the Candidate.content will be empty if
// content filters blocked the output.
FinishReasonSafety FinishReason = 3
// FinishReasonRecitation means the token generation was stopped as the response was flagged for
// unauthorized citations.
FinishReasonRecitation FinishReason = 4
// FinishReasonOther means all other reasons that stopped the token generation
FinishReasonOther FinishReason = 5
// FinishReasonBlocklist means the token generation was stopped as the response was flagged for the
// terms which are included from the terminology blocklist.
FinishReasonBlocklist FinishReason = 6
// FinishReasonProhibitedContent means the token generation was stopped as the response was flagged for
// the prohibited contents.
FinishReasonProhibitedContent FinishReason = 7
// FinishReasonSpii means the token generation was stopped as the response was flagged for
// Sensitive Personally Identifiable Information (SPII) contents.
FinishReasonSpii FinishReason = 8
)
func (FinishReason) String
func (v FinishReason) String() string
FunctionCall
type FunctionCall struct {
// Required. The name of the function to call.
// Matches [FunctionDeclaration.name].
Name string
// Optional. Required. The function parameters and values in JSON object
// format. See [FunctionDeclaration.parameters] for parameter details.
Args map[string]any
}
FunctionCall è un valore [FunctionCall] previsto restituito dal modello che contiene una stringa che rappresenta [FunctionDeclaration.name] e un oggetto JSON strutturato contenente i parametri e i relativi valori.
FunctionCallingConfig
type FunctionCallingConfig struct {
// Optional. Function calling mode.
Mode FunctionCallingMode
// Optional. Function names to call. Only set when the Mode is ANY. Function
// names should match [FunctionDeclaration.name]. With mode set to ANY, model
// will predict a function call from the set of function names provided.
AllowedFunctionNames []string
}
FunctionCallingConfig conserva la configurazione per le chiamate di funzione.
FunctionCallingMode
type FunctionCallingMode int32
FunctionCallingMode è la modalità di chiamata delle funzioni.
FunctionCallingUnspecified, FunctionCallingAuto, FunctionCallingAny, FunctionCallingNone
const (
// FunctionCallingUnspecified means unspecified function calling mode. This value should not be used.
FunctionCallingUnspecified FunctionCallingMode = 0
// FunctionCallingAuto means default model behavior, model decides to predict either a function call
// or a natural language repspose.
FunctionCallingAuto FunctionCallingMode = 1
// FunctionCallingAny means model is constrained to always predicting a function call only.
// If "allowed_function_names" are set, the predicted function call will be
// limited to any one of "allowed_function_names", else the predicted
// function call will be any one of the provided "function_declarations".
FunctionCallingAny FunctionCallingMode = 2
// FunctionCallingNone means model will not predict any function call. Model behavior is same as when
// not passing any function declarations.
FunctionCallingNone FunctionCallingMode = 3
)
func (FunctionCallingMode) String
func (v FunctionCallingMode) String() string
FunctionDeclaration
type FunctionDeclaration struct {
// Required. The name of the function to call.
// Must start with a letter or an underscore.
// Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a
// maximum length of 64.
Name string
// Optional. Description and purpose of the function.
// Model uses it to decide how and whether to call the function.
Description string
// Optional. Describes the parameters to this function in JSON Schema Object
// format. Reflects the Open API 3.03 Parameter Object. string Key: the name
// of the parameter. Parameter names are case sensitive. Schema Value: the
// Schema defining the type used for the parameter. For function with no
// parameters, this can be left unset. Parameter names must start with a
// letter or an underscore and must only contain chars a-z, A-Z, 0-9, or
// underscores with a maximum length of 64. Example with 1 required and 1
// optional parameter: type: OBJECT properties:
//
// param1:
// type: STRING
// param2:
// type: INTEGER
//
// required:
// - param1
Parameters *Schema
// Optional. Describes the output from this function in JSON Schema format.
// Reflects the Open API 3.03 Response Object. The Schema defines the type
// used for the response value of the function.
Response *Schema
}
FunctionDeclaration è una rappresentazione strutturata di una dichiarazione di funzione come definito dalla specifica OpenAPI 3.0. Questa dichiarazione include il nome e i parametri della funzione. Questa dichiarazione di funzione è una rappresentazione di un blocco di codice che può essere utilizzato come Tool
dal modello ed eseguito dal client.
FunctionResponse
type FunctionResponse struct {
// Required. The name of the function to call.
// Matches [FunctionDeclaration.name] and [FunctionCall.name].
Name string
// Required. The function response in JSON object format.
Response map[string]any
}
FunctionResponse è l'output del risultato di una funzione [FunctionCall] contenente una stringa che rappresenta [FunctionDeclaration.name], mentre un oggetto JSON strutturato contenente qualsiasi output della funzione viene utilizzato come contesto per il modello. Deve contenere il risultato di una funzione [FunctionCall] in base alla previsione del modello.
GenerateContentResponse
type GenerateContentResponse struct {
// Output only. Generated candidates.
Candidates []*Candidate
// Output only. Content filter results for a prompt sent in the request.
// Note: Sent only in the first stream chunk.
// Only happens when no candidates were generated due to content violations.
PromptFeedback *PromptFeedback
// Usage metadata about the response(s).
UsageMetadata *UsageMetadata
}
GeneraContentResponse è la risposta a una chiamata GeneraContent o GeneraContentStream.
GenerateContentResponseIterator
type GenerateContentResponseIterator struct {
// contains filtered or unexported fields
}
GeneraContentResponseIterator è un iteratore di GnerateContentResponse.
func (*GenerateContentResponseIterator) Next
func (iter *GenerateContentResponseIterator) Next() (*GenerateContentResponse, error)
Next restituisce la risposta successiva.
GenerationConfig
type GenerationConfig struct {
// Optional. Controls the randomness of predictions.
Temperature *float32
// Optional. If specified, nucleus sampling will be used.
TopP *float32
// Optional. If specified, top-k sampling will be used.
TopK *int32
// Optional. Number of candidates to generate.
CandidateCount *int32
// Optional. The maximum number of output tokens to generate per message.
MaxOutputTokens *int32
// Optional. Stop sequences.
StopSequences []string
// Optional. Positive penalties.
PresencePenalty *float32
// Optional. Frequency penalties.
FrequencyPenalty *float32
// Optional. Output response mimetype of the generated candidate text.
// Supported mimetype:
// - `text/plain`: (default) Text output.
// - `application/json`: JSON response in the candidates.
// The model needs to be prompted to output the appropriate response type,
// otherwise the behavior is undefined.
// This is a preview feature.
ResponseMIMEType string
}
GenerationConfig è una configurazione di generazione.
func (*GenerationConfig) SetCandidateCount
func (c *GenerationConfig) SetCandidateCount(x int32)
SetCandidateCount imposta il campo CandidateCount.
func (*GenerationConfig) SetMaxOutputTokens
func (c *GenerationConfig) SetMaxOutputTokens(x int32)
SetMaxOutputTokens imposta il campo MaxOutputTokens.
func (*GenerationConfig) SetTemperature
func (c *GenerationConfig) SetTemperature(x float32)
SetTemperature imposta il campo Temperatura.
func (*GenerationConfig) SetTopK
func (c *GenerationConfig) SetTopK(x int32)
SetTopK imposta il campo TopK.
func (*GenerationConfig) SetTopP
func (c *GenerationConfig) SetTopP(x float32)
SetTopP imposta il campo TopP.
GenerativeModel
type GenerativeModel struct {
GenerationConfig
SafetySettings []*SafetySetting
Tools []*Tool
ToolConfig *ToolConfig // configuration for tools
SystemInstruction *Content
// contains filtered or unexported fields
}
GenerativeModel è un modello in grado di generare testo. Creane una con [Client.GenerativeModel] e configurala impostando i campi esportati.
Il modello contiene tutte le configurazioni per una richiesta ManageContentRequest, quindi il metodo metodoGenerateContent può utilizzare un vararg per i contenuti.
func (*GenerativeModel) CountTokens
func (m *GenerativeModel) CountTokens(ctx context.Context, parts ...Part) (*CountTokensResponse, error)
CountTokens conteggia il numero di token nel contenuto.
Esempio
package main
import (
"context"
"fmt"
"log"
"cloud.google.com/go/vertexai/genai"
)
// Your GCP project
const projectID = "your-project"
// A GCP location like "us-central1"
const location = "some-gcp-location"
// A model name like "gemini-1.0-pro"
const model = "some-model"
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, projectID, location)
if err != nil {
log.Fatal(err)
}
defer client.Close()
model := client.GenerativeModel(model)
resp, err := model.CountTokens(ctx, genai.Text("What kind of fish is this?"))
if err != nil {
log.Fatal(err)
}
fmt.Println("Num tokens:", resp.TotalTokens)
}
func (*GenerativeModel) GenerateContent
func (m *GenerativeModel) GenerateContent(ctx context.Context, parts ...Part) (*GenerateContentResponse, error)
GeneraContent produce una singola richiesta e risposta.
Esempi
package main
import (
"context"
"fmt"
"log"
"cloud.google.com/go/vertexai/genai"
)
// Your GCP project
const projectID = "your-project"
// A GCP location like "us-central1"
const location = "some-gcp-location"
// A model name like "gemini-1.0-pro"
const model = "some-model"
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, projectID, location)
if err != nil {
log.Fatal(err)
}
defer client.Close()
model := client.GenerativeModel(model)
model.SetTemperature(0.9)
resp, err := model.GenerateContent(ctx, genai.Text("What is the average size of a swallow?"))
if err != nil {
log.Fatal(err)
}
printResponse(resp)
}
func printResponse(resp *genai.GenerateContentResponse) {
for _, cand := range resp.Candidates {
for _, part := range cand.Content.Parts {
fmt.Println(part)
}
}
fmt.Println("---")
}
config
package main
import (
"context"
"fmt"
"log"
"cloud.google.com/go/vertexai/genai"
)
func main() {
ctx := context.Background()
const projectID = "YOUR PROJECT ID"
const location = "GCP LOCATION"
client, err := genai.NewClient(ctx, projectID, location)
if err != nil {
log.Fatal(err)
}
defer client.Close()
model := client.GenerativeModel("gemini-1.0-pro")
model.SetTemperature(0.9)
model.SetTopP(0.5)
model.SetTopK(20)
model.SetMaxOutputTokens(100)
model.SystemInstruction = &genai.Content{
Parts: []genai.Part{genai.Text("You are Yoda from Star Wars.")},
}
resp, err := model.GenerateContent(ctx, genai.Text("What is the average size of a swallow?"))
if err != nil {
log.Fatal(err)
}
printResponse(resp)
}
func printResponse(resp *genai.GenerateContentResponse) {
for _, cand := range resp.Candidates {
for _, part := range cand.Content.Parts {
fmt.Println(part)
}
}
fmt.Println("---")
}
func (*GenerativeModel) GenerateContentStream
func (m *GenerativeModel) GenerateContentStream(ctx context.Context, parts ...Part) *GenerateContentResponseIterator
GeneraContentStream restituisce un iteratore che elenca le risposte.
Esempio
package main
import (
"context"
"fmt"
"log"
"cloud.google.com/go/vertexai/genai"
"google.golang.org/api/iterator"
)
// Your GCP project
const projectID = "your-project"
// A GCP location like "us-central1"
const location = "some-gcp-location"
// A model name like "gemini-1.0-pro"
const model = "some-model"
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, projectID, location)
if err != nil {
log.Fatal(err)
}
defer client.Close()
model := client.GenerativeModel(model)
iter := model.GenerateContentStream(ctx, genai.Text("Tell me a story about a lumberjack and his giant ox. Keep it very short."))
for {
resp, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
printResponse(resp)
}
}
func printResponse(resp *genai.GenerateContentResponse) {
for _, cand := range resp.Candidates {
for _, part := range cand.Content.Parts {
fmt.Println(part)
}
}
fmt.Println("---")
}
func (*GenerativeModel) Name
func (m *GenerativeModel) Name() string
Nome restituisce il nome del modello.
func (*GenerativeModel) StartChat
func (m *GenerativeModel) StartChat() *ChatSession
StartChat avvia una sessione di chat.
HarmBlockMethod
type HarmBlockMethod int32
HarmBlockMethod determina il modo in cui viene fatto il blocco dei danni.
HarmBlockMethodNon specificato, HarmBlockMethodSeverity, HarmBlockMethodProbability
const (
// HarmBlockMethodUnspecified means the harm block method is unspecified.
HarmBlockMethodUnspecified HarmBlockMethod = 0
// HarmBlockMethodSeverity means the harm block method uses both probability and severity scores.
HarmBlockMethodSeverity HarmBlockMethod = 1
// HarmBlockMethodProbability means the harm block method uses the probability score.
HarmBlockMethodProbability HarmBlockMethod = 2
)
func (HarmBlockMethod) String
func (v HarmBlockMethod) String() string
HarmBlockThreshold
type HarmBlockThreshold int32
HarmBlockThreshold specifica i livelli soglia basati sulla probabilità per il blocco.
HarmBlockNon specificato, HarmBlockLowAndAbove, HarmBlockMediumAndAbove, HarmBlockOnlyHigh, HarmBlockNone
const (
// HarmBlockUnspecified means unspecified harm block threshold.
HarmBlockUnspecified HarmBlockThreshold = 0
// HarmBlockLowAndAbove means block low threshold and above (i.e. block more).
HarmBlockLowAndAbove HarmBlockThreshold = 1
// HarmBlockMediumAndAbove means block medium threshold and above.
HarmBlockMediumAndAbove HarmBlockThreshold = 2
// HarmBlockOnlyHigh means block only high threshold (i.e. block less).
HarmBlockOnlyHigh HarmBlockThreshold = 3
// HarmBlockNone means block none.
HarmBlockNone HarmBlockThreshold = 4
)
func (HarmBlockThreshold) String
func (v HarmBlockThreshold) String() string
HarmCategory
type HarmCategory int32
HarmCategory specifica le categorie di danni che bloccano i contenuti.
Categoria di dannoNon specificata, Categoria di danni incitamento all'odio, Categoria Contenuti pericolosi, Categoria di danni Molestie, Categoria di danni sessualmente esplicita
const (
// HarmCategoryUnspecified means the harm category is unspecified.
HarmCategoryUnspecified HarmCategory = 0
// HarmCategoryHateSpeech means the harm category is hate speech.
HarmCategoryHateSpeech HarmCategory = 1
// HarmCategoryDangerousContent means the harm category is dangerous content.
HarmCategoryDangerousContent HarmCategory = 2
// HarmCategoryHarassment means the harm category is harassment.
HarmCategoryHarassment HarmCategory = 3
// HarmCategorySexuallyExplicit means the harm category is sexually explicit content.
HarmCategorySexuallyExplicit HarmCategory = 4
)
func (HarmCategory) String
func (v HarmCategory) String() string
HarmProbability
type HarmProbability int32
HarmProbability specifica i livelli di probabilità di danno nei contenuti.
Probabilità di HarmNon specificata, Probabilità di danni trascurabile, Probabilità di HarmProbabilityBassa, Probabilità di danni media, Probabilità di danni elevata
const (
// HarmProbabilityUnspecified means harm probability unspecified.
HarmProbabilityUnspecified HarmProbability = 0
// HarmProbabilityNegligible means negligible level of harm.
HarmProbabilityNegligible HarmProbability = 1
// HarmProbabilityLow means low level of harm.
HarmProbabilityLow HarmProbability = 2
// HarmProbabilityMedium means medium level of harm.
HarmProbabilityMedium HarmProbability = 3
// HarmProbabilityHigh means high level of harm.
HarmProbabilityHigh HarmProbability = 4
)
func (HarmProbability) String
func (v HarmProbability) String() string
HarmSeverity
type HarmSeverity int32
HarmSeverity specifica i livelli di gravità del danno.
DannoSeveritàNon specificata, DannoSeverità Trascurabile, DannoSeveritàBasso, DannoSeveritàMedio, DannoSeveritàAlta
const (
// HarmSeverityUnspecified means harm severity unspecified.
HarmSeverityUnspecified HarmSeverity = 0
// HarmSeverityNegligible means negligible level of harm severity.
HarmSeverityNegligible HarmSeverity = 1
// HarmSeverityLow means low level of harm severity.
HarmSeverityLow HarmSeverity = 2
// HarmSeverityMedium means medium level of harm severity.
HarmSeverityMedium HarmSeverity = 3
// HarmSeverityHigh means high level of harm severity.
HarmSeverityHigh HarmSeverity = 4
)
func (HarmSeverity) String
func (v HarmSeverity) String() string
Parte
type Part interface {
// contains filtered or unexported methods
}
Una parte è un Text, un Blob o un FileData.
PromptFeedback
type PromptFeedback struct {
// Output only. Blocked reason.
BlockReason BlockedReason
// Output only. Safety ratings.
SafetyRatings []*SafetyRating
// Output only. A readable block reason message.
BlockReasonMessage string
}
PromptFeedback contiene i risultati del filtro dei contenuti per un prompt inviato nella richiesta.
SafetyRating
type SafetyRating struct {
// Output only. Harm category.
Category HarmCategory
// Output only. Harm probability levels in the content.
Probability HarmProbability
// Output only. Harm probability score.
ProbabilityScore float32
// Output only. Harm severity levels in the content.
Severity HarmSeverity
// Output only. Harm severity score.
SeverityScore float32
// Output only. Indicates whether the content was filtered out because of this
// rating.
Blocked bool
}
SafetyRating è la valutazione di sicurezza corrispondente ai contenuti generati.
SafetySetting
type SafetySetting struct {
// Required. Harm category.
Category HarmCategory
// Required. The harm block threshold.
Threshold HarmBlockThreshold
// Optional. Specify if the threshold is used for probability or severity
// score. If not specified, the threshold is used for probability score.
Method HarmBlockMethod
}
Impostazioni di sicurezza sono impostazioni di sicurezza.
Schema
type Schema struct {
// Optional. The type of the data.
Type Type
// Optional. The format of the data.
// Supported formats:
//
// for NUMBER type: "float", "double"
// for INTEGER type: "int32", "int64"
// for STRING type: "email", "byte", etc
Format string
// Optional. The title of the Schema.
Title string
// Optional. The description of the data.
Description string
// Optional. Indicates if the value may be null.
Nullable bool
// Optional. SCHEMA FIELDS FOR TYPE ARRAY
// Schema of the elements of Type.ARRAY.
Items *Schema
// Optional. Minimum number of the elements for Type.ARRAY.
MinItems int64
// Optional. Maximum number of the elements for Type.ARRAY.
MaxItems int64
// Optional. Possible values of the element of Type.STRING with enum format.
// For example we can define an Enum Direction as :
// {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]}
Enum []string
// Optional. SCHEMA FIELDS FOR TYPE OBJECT
// Properties of Type.OBJECT.
Properties map[string]*Schema
// Optional. Required properties of Type.OBJECT.
Required []string
// Optional. Minimum number of the properties for Type.OBJECT.
MinProperties int64
// Optional. Maximum number of the properties for Type.OBJECT.
MaxProperties int64
// Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER
// Minimum value of the Type.INTEGER and Type.NUMBER
Minimum float64
// Optional. Maximum value of the Type.INTEGER and Type.NUMBER
Maximum float64
// Optional. SCHEMA FIELDS FOR TYPE STRING
// Minimum length of the Type.STRING
MinLength int64
// Optional. Maximum length of the Type.STRING
MaxLength int64
// Optional. Pattern of the Type.STRING to restrict a string to a regular
// expression.
Pattern string
}
Lo schema viene utilizzato per definire il formato dei dati di input/output. Rappresenta un sottoinsieme selezionato di un oggetto schema OpenAPI 3.0. In futuro è possibile aggiungere altri campi in base alle necessità.
Testo
type Text string
Un testo è un pezzo di testo, ad esempio una domanda o una frase.
Strumento
type Tool struct {
// Optional. Function tool type.
// One or more function declarations to be passed to the model along with the
// current user query. Model may decide to call a subset of these functions
// by populating [FunctionCall][content.part.function_call] in the response.
// User should provide a [FunctionResponse][content.part.function_response]
// for each function call in the next turn. Based on the function responses,
// Model will generate the final response back to the user.
// Maximum 64 function declarations can be provided.
FunctionDeclarations []*FunctionDeclaration
}
Dettagli dello strumento che il modello può utilizzare per generare la risposta.
Un Tool
è una porzione di codice che consente al sistema di interagire con
sistemi esterni per eseguire un'azione o un insieme di azioni, al di fuori delle
informazioni e dell'ambito del modello. Un oggetto strumento deve contenere esattamente un tipo di strumento (ad esempio FunctionDeclaration, Retrieval o GoogleSearchRetrieval).
Esempio
package main
import (
"context"
"fmt"
"log"
"cloud.google.com/go/vertexai/genai"
)
// Your GCP project
const projectID = "your-project"
// A GCP location like "us-central1"
const location = "some-gcp-location"
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, projectID, location)
if err != nil {
log.Fatal(err)
}
defer client.Close()
currentWeather := func(city string) string {
switch city {
case "New York, NY":
return "cold"
case "Miami, FL":
return "warm"
default:
return "unknown"
}
}
// To use functions / tools, we have to first define a schema that describes
// the function to the model. The schema is similar to OpenAPI 3.0.
//
// In this example, we create a single function that provides the model with
// a weather forecast in a given location.
schema := &genai.Schema{
Type: genai.TypeObject,
Properties: map[string]*genai.Schema{
"location": {
Type: genai.TypeString,
Description: "The city and state, e.g. San Francisco, CA",
},
"unit": {
Type: genai.TypeString,
Enum: []string{"celsius", "fahrenheit"},
},
},
Required: []string{"location"},
}
weatherTool := &genai.Tool{
FunctionDeclarations: []*genai.FunctionDeclaration{{
Name: "CurrentWeather",
Description: "Get the current weather in a given location",
Parameters: schema,
}},
}
model := client.GenerativeModel("gemini-1.0-pro")
// Before initiating a conversation, we tell the model which tools it has
// at its disposal.
model.Tools = []*genai.Tool{weatherTool}
// For using tools, the chat mode is useful because it provides the required
// chat context. A model needs to have tools supplied to it in the chat
// history so it can use them in subsequent conversations.
//
// The flow of message expected here is:
//
// 1. We send a question to the model
// 2. The model recognizes that it needs to use a tool to answer the question,
// an returns a FunctionCall response asking to use the CurrentWeather
// tool.
// 3. We send a FunctionResponse message, simulating the return value of
// CurrentWeather for the model's query.
// 4. The model provides its text answer in response to this message.
session := model.StartChat()
res, err := session.SendMessage(ctx, genai.Text("What is the weather like in New York?"))
if err != nil {
log.Fatal(err)
}
part := res.Candidates[0].Content.Parts[0]
funcall, ok := part.(genai.FunctionCall)
if !ok {
log.Fatalf("expected FunctionCall: %v", part)
}
if funcall.Name != "CurrentWeather" {
log.Fatalf("expected CurrentWeather: %v", funcall.Name)
}
// Expect the model to pass a proper string "location" argument to the tool.
locArg, ok := funcall.Args["location"].(string)
if !ok {
log.Fatalf("expected string: %v", funcall.Args["location"])
}
weatherData := currentWeather(locArg)
res, err = session.SendMessage(ctx, genai.FunctionResponse{
Name: weatherTool.FunctionDeclarations[0].Name,
Response: map[string]any{
"weather": weatherData,
},
})
if err != nil {
log.Fatal(err)
}
printResponse(res)
}
func printResponse(resp *genai.GenerateContentResponse) {
for _, cand := range resp.Candidates {
for _, part := range cand.Content.Parts {
fmt.Println(part)
}
}
fmt.Println("---")
}
ToolConfig
type ToolConfig struct {
// Optional. Function calling config.
FunctionCallingConfig *FunctionCallingConfig
}
ToolConfig configura gli strumenti.
Tipo
type Type int32
Il tipo contiene l'elenco dei tipi di dati OpenAPI come definito da https://swagger.io/docs/specification/data-models/data-types/
TypeUnspecified, TypeString, TypeNumber, TypeInteger, TypeBoolean, TypeArray, TypeObject
const (
// TypeUnspecified means not specified, should not be used.
TypeUnspecified Type = 0
// TypeString means openAPI string type
TypeString Type = 1
// TypeNumber means openAPI number type
TypeNumber Type = 2
// TypeInteger means openAPI integer type
TypeInteger Type = 3
// TypeBoolean means openAPI boolean type
TypeBoolean Type = 4
// TypeArray means openAPI array type
TypeArray Type = 5
// TypeObject means openAPI object type
TypeObject Type = 6
)
func (Type) String
UsageMetadata
type UsageMetadata struct {
// Number of tokens in the request.
PromptTokenCount int32
// Number of tokens in the response(s).
CandidatesTokenCount int32
TotalTokenCount int32
}
UsageMetadata è un metadati di utilizzo relativi alle risposte.