複製要求內文並開啟方法參考資料頁面。系統會在頁面右側開啟 APIs Explorer 面板。您可以使用這項工具來傳送要求。將要求內文貼到這項工具中,並填妥其他必填欄位,然後按一下「Execute」(執行)。
您應該會收到如下的 JSON 回應:
{
"responseId": "3c1e5a89-75b9-4c3f-b63d-4b1351dd5e32",
"queryResult": {
"queryText": "book a room",
"action": "room.reservation",
"parameters": {
"time": "",
"date": "",
"guests": "",
"duration": "",
"location": ""
},
"fulfillmentText": "I can help with that. Where would you like to reserve a room?",
"fulfillmentMessages": [
{
"text": {
"text": [
"I can help with that. Where would you like to reserve a room?"
]
}
}
],
"intent": {
"name": "projects/PROJECT_ID/agent/intents/e8f6a63e-73da-4a1a-8bfc-857183f71228",
"displayName": "room.reservation"
},
"intentDetectionConfidence": 1,
"diagnosticInfo": {},
"languageCode": "en-us"
}
}
funcDetectIntentAudio(projectID,sessionID,audioFile,languageCodestring)(string,error){ctx:=context.Background()sessionClient,err:=dialogflow.NewSessionsClient(ctx)iferr!=nil{return"",err}defersessionClient.Close()ifprojectID==""||sessionID==""{return"",fmt.Errorf("detect.DetectIntentAudio empty project (%s) or session (%s)",projectID,sessionID)}sessionPath:=fmt.Sprintf("projects/%s/agent/sessions/%s",projectID,sessionID)// In this example, we hard code the encoding and sample rate for simplicity.audioConfig:=dialogflowpb.InputAudioConfig{AudioEncoding:dialogflowpb.AudioEncoding_AUDIO_ENCODING_LINEAR_16,SampleRateHertz:16000,LanguageCode:languageCode}queryAudioInput:=dialogflowpb.QueryInput_AudioConfig{AudioConfig:&audioConfig}audioBytes,err:=os.ReadFile(audioFile)iferr!=nil{return"",err}queryInput:=dialogflowpb.QueryInput{Input:&queryAudioInput}request:=dialogflowpb.DetectIntentRequest{Session:sessionPath,QueryInput:&queryInput,InputAudio:audioBytes}response,err:=sessionClient.DetectIntent(ctx,&request)iferr!=nil{return"",err}queryResult:=response.GetQueryResult()fulfillmentText:=queryResult.GetFulfillmentText()returnfulfillmentText,nil}
importcom.google.api.gax.rpc.ApiException;importcom.google.cloud.dialogflow.v2.AudioEncoding;importcom.google.cloud.dialogflow.v2.DetectIntentRequest;importcom.google.cloud.dialogflow.v2.DetectIntentResponse;importcom.google.cloud.dialogflow.v2.InputAudioConfig;importcom.google.cloud.dialogflow.v2.QueryInput;importcom.google.cloud.dialogflow.v2.QueryResult;importcom.google.cloud.dialogflow.v2.SessionName;importcom.google.cloud.dialogflow.v2.SessionsClient;importcom.google.protobuf.ByteString;importjava.io.IOException;importjava.nio.file.Files;importjava.nio.file.Paths;publicclassDetectIntentAudio{// DialogFlow API Detect Intent sample with audio files.publicstaticQueryResultdetectIntentAudio(StringprojectId,StringaudioFilePath,StringsessionId,StringlanguageCode)throwsIOException,ApiException{// Instantiates a clienttry(SessionsClientsessionsClient=SessionsClient.create()){// Set the session name using the sessionId (UUID) and projectID (my-project-id)SessionNamesession=SessionName.of(projectId,sessionId);System.out.println("Session Path: "+session.toString());// Note: hard coding audioEncoding and sampleRateHertz for simplicity.// Audio encoding of the audio content sent in the query request.AudioEncodingaudioEncoding=AudioEncoding.AUDIO_ENCODING_LINEAR_16;intsampleRateHertz=16000;// Instructs the speech recognizer how to process the audio content.InputAudioConfiginputAudioConfig=InputAudioConfig.newBuilder().setAudioEncoding(audioEncoding)// audioEncoding = AudioEncoding.AUDIO_ENCODING_LINEAR_16.setLanguageCode(languageCode)// languageCode = "en-US".setSampleRateHertz(sampleRateHertz)// sampleRateHertz = 16000.build();// Build the query with the InputAudioConfigQueryInputqueryInput=QueryInput.newBuilder().setAudioConfig(inputAudioConfig).build();// Read the bytes from the audio filebyte[]inputAudio=Files.readAllBytes(Paths.get(audioFilePath));// Build the DetectIntentRequestDetectIntentRequestrequest=DetectIntentRequest.newBuilder().setSession(session.toString()).setQueryInput(queryInput).setInputAudio(ByteString.copyFrom(inputAudio)).build();// Performs the detect intent requestDetectIntentResponseresponse=sessionsClient.detectIntent(request);// Display the query resultQueryResultqueryResult=response.getQueryResult();System.out.println("====================");System.out.format("Query Text: '%s'\n",queryResult.getQueryText());System.out.format("Detected Intent: %s (confidence: %f)\n",queryResult.getIntent().getDisplayName(),queryResult.getIntentDetectionConfidence());System.out.format("Fulfillment Text: '%s'\n",queryResult.getFulfillmentMessagesCount() > 0?queryResult.getFulfillmentMessages(0).getText():"Triggered Default Fallback Intent");returnqueryResult;}}}
constfs=require('fs');constutil=require('util');const{struct}=require('pb-util');// Imports the Dialogflow libraryconstdialogflow=require('@google-cloud/dialogflow');// Instantiates a session clientconstsessionClient=newdialogflow.SessionsClient();// The path to identify the agent that owns the created intent.constsessionPath=sessionClient.projectAgentSessionPath(projectId,sessionId);// Read the content of the audio file and send it as part of the request.constreadFile=util.promisify(fs.readFile);constinputAudio=awaitreadFile(filename);constrequest={session:sessionPath,queryInput:{audioConfig:{audioEncoding:encoding,sampleRateHertz:sampleRateHertz,languageCode:languageCode,},},inputAudio:inputAudio,};// Recognizes the speech in the audio and detects its intent.const[response]=awaitsessionClient.detectIntent(request);console.log('Detected intent:');constresult=response.queryResult;// Instantiates a context clientconstcontextClient=newdialogflow.ContextsClient();console.log(` Query: ${result.queryText}`);console.log(` Response: ${result.fulfillmentText}`);if(result.intent){console.log(` Intent: ${result.intent.displayName}`);}else{console.log(' No intent matched.');}constparameters=JSON.stringify(struct.decode(result.parameters));console.log(` Parameters: ${parameters}`);if(result.outputContexts && result.outputContexts.length){console.log(' Output contexts:');result.outputContexts.forEach(context=>{constcontextId=contextClient.matchContextFromProjectAgentSessionContextName(context.name);constcontextParameters=JSON.stringify(struct.decode(context.parameters));console.log(` ${contextId}`);console.log(` lifespan: ${context.lifespanCount}`);console.log(` parameters: ${contextParameters}`);});}
defdetect_intent_audio(project_id,session_id,audio_file_path,language_code):"""Returns the result of detect intent with an audio file as input. Using the same `session_id` between requests allows continuation of the conversation."""fromgoogle.cloudimportdialogflowsession_client=dialogflow.SessionsClient()# Note: hard coding audio_encoding and sample_rate_hertz for simplicity.audio_encoding=dialogflow.AudioEncoding.AUDIO_ENCODING_LINEAR_16sample_rate_hertz=16000session=session_client.session_path(project_id,session_id)print("Session path: {}\n".format(session))withopen(audio_file_path,"rb")asaudio_file:input_audio=audio_file.read()audio_config=dialogflow.InputAudioConfig(audio_encoding=audio_encoding,language_code=language_code,sample_rate_hertz=sample_rate_hertz,)query_input=dialogflow.QueryInput(audio_config=audio_config)request=dialogflow.DetectIntentRequest(session=session,query_input=query_input,input_audio=input_audio,)response=session_client.detect_intent(request=request)print("="*20)print("Query text: {}".format(response.query_result.query_text))print("Detected intent: {} (confidence: {})\n".format(response.query_result.intent.display_name,response.query_result.intent_detection_confidence,))print("Fulfillment text: {}\n".format(response.query_result.fulfillment_text))
[[["容易理解","easyToUnderstand","thumb-up"],["確實解決了我的問題","solvedMyProblem","thumb-up"],["其他","otherUp","thumb-up"]],[["難以理解","hardToUnderstand","thumb-down"],["資訊或程式碼範例有誤","incorrectInformationOrSampleCode","thumb-down"],["缺少我需要的資訊/範例","missingTheInformationSamplesINeed","thumb-down"],["翻譯問題","translationIssue","thumb-down"],["其他","otherDown","thumb-down"]],["上次更新時間:2025-09-04 (世界標準時間)。"],[[["\u003cp\u003eThis guide details how to send audio input to the Dialogflow API for intent detection, a process which includes the conversion of audio to text, also known as audio input, speech recognition, speech-to-text, or STT.\u003c/p\u003e\n"],["\u003cp\u003eBefore using this guide, it is recommended to have read the Dialogflow basics and have completed the setup steps, and this guide is applicable exclusively when interacting directly with the API rather than through integrations.\u003c/p\u003e\n"],["\u003cp\u003eTo begin, users must create a Dialogflow agent in the Dialogflow ES console, or use the provided "room-booking-agent.zip" file via the restore option to ensure the guide's examples work as intended.\u003c/p\u003e\n"],["\u003cp\u003eThe \u003ccode\u003edetectIntent\u003c/code\u003e method, available on the \u003ccode\u003eSessions\u003c/code\u003e type, is used to send the request, which requires a base64 encoded audio file and a JSON request body formatted with a Google Cloud Project ID and the encoded audio.\u003c/p\u003e\n"],["\u003cp\u003eThe response from a correctly formatted request will provide a \u003ccode\u003equeryResult\u003c/code\u003e which can include the \u003ccode\u003eaction\u003c/code\u003e, \u003ccode\u003efulfillmentMessages\u003c/code\u003e, and \u003ccode\u003eintent\u003c/code\u003e fields, which in the case of this example, will trigger a "room.reservation" action.\u003c/p\u003e\n"]]],[],null,[]]